Resolve restrictions of BufferContents, add support for allocating all types of buffers, and rework vulkano-shaders (#2132)

* Add `BufferContentsLayout`

* Rework `BufferContents`

* Add `BufferContents` derive macro

* Relax restrictions of shader code generation

* Fix examples

* Add additional invariant to `Subbuffer`

* Shorten paths a bit

* Remove a bit of bloat

* Add `Sized` constraint to element traits

* Fix an oopsie

* Add `Aligned`

* Add support for deriving `BufferContents` for sized types

* Fix alignment and padding issues

* Fix docs and add examples for `BufferContents`

* Adjust shader macro

* Add tests

* Adjust `Vertex` example

* Remove bytemuck re-export

* Update examples

* Workaround bytemuck's array elements that are `AnyBitPattern` limitation

* Add more alignments

* Fix an earlier oopsie

* Rework vulkano-shaders

* Fix examples

* Fix some rogue tabs in examples

* Add `AsRef` and `AsMut` implementation for `Padded`

* Remove useless code duplication

* Make the `BufferContents` derive macro the same for all types

* Add example docs for `Padded`

* Work around trivial bounds

* More example docs

* Minor consistency adjustment

* Add `serde` to the list of cargo features

* Make clippy happy again
This commit is contained in:
marc0246 2023-03-05 19:56:35 +01:00 committed by GitHub
parent 4f03fe55ac
commit baf863ce33
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
81 changed files with 5187 additions and 4033 deletions

View File

@ -7,7 +7,7 @@ publish = false
[dependencies]
# The `vulkano` crate is the main crate that you must use to use Vulkan.
vulkano = { path = "../vulkano" }
vulkano = { path = "../vulkano", features = ["serde"] }
# Provides the `shader!` macro that is used to generate code for using shaders.
vulkano-shaders = { path = "../vulkano-shaders" }
# The Vulkan library doesn't provide any functionality to create and handle windows, as
@ -18,10 +18,9 @@ winit = "0.27"
vulkano-win = { path = "../vulkano-win" }
vulkano-util = { path = "../vulkano-util" }
bytemuck = { version = "1.7", features = ["derive", "extern_crate_std", "min_const_generics"] }
cgmath = "0.18"
png = "0.17"
serde = { version = "1.0", features = ["derive"] }
ron = "0.8"
rand = "0.8.4"
glium = "0.32.1"
png = "0.17"
rand = "0.8.4"
ron = "0.8"
serde = { version = "1.0", features = ["derive"] }

View File

@ -38,7 +38,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -55,8 +54,8 @@ fn main() {
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
// The Vulkan specs guarantee that a compliant implementation must provide at least one queue
// that supports compute operations.
// The Vulkan specs guarantee that a compliant implementation must provide at least one
// queue that supports compute operations.
p.queue_family_properties()
.iter()
.position(|q| q.queue_flags.intersects(QueueFlags::COMPUTE))
@ -75,7 +74,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
// Now initializing the device.
@ -99,17 +98,17 @@ fn main() {
// Now let's get to the actual example.
//
// What we are going to do is very basic: we are going to fill a buffer with 64k integers
// and ask the GPU to multiply each of them by 12.
// What we are going to do is very basic: we are going to fill a buffer with 64k integers and
// ask the GPU to multiply each of them by 12.
//
// GPUs are very good at parallel computations (SIMD-like operations), and thus will do this
// much more quickly than a CPU would do. While a CPU would typically multiply them one by one
// or four by four, a GPU will do it by groups of 32 or 64.
//
// Note however that in a real-life situation for such a simple operation the cost of
// accessing memory usually outweighs the benefits of a faster calculation. Since both the CPU
// and the GPU will need to access data, there is no other choice but to transfer the data
// through the slow PCI express bus.
// Note however that in a real-life situation for such a simple operation the cost of accessing
// memory usually outweighs the benefits of a faster calculation. Since both the CPU and the
// GPU will need to access data, there is no other choice but to transfer the data through the
// slow PCI express bus.
// We need to create the compute pipeline that describes our operation.
//
@ -119,23 +118,24 @@ fn main() {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
uint idx = gl_GlobalInvocationID.x;
data.data[idx] *= 12;
data[idx] *= 12;
}
"
",
}
}
let shader = cs::load(device.clone()).unwrap();
ComputePipeline::new(
device.clone(),
shader.entry_point("main").unwrap(),
@ -152,20 +152,16 @@ fn main() {
StandardCommandBufferAllocator::new(device.clone(), Default::default());
// We start by creating the buffer that will store the data.
let data_buffer = {
// Iterator that produces the data.
let data_iter = 0..65536u32;
// Builds the buffer and fills it with this iterator.
Buffer::from_iter(
let data_buffer = Buffer::from_iter(
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::STORAGE_BUFFER,
..Default::default()
},
data_iter,
// Iterator that produces the data.
0..65536u32,
)
.unwrap()
};
.unwrap();
// In order to let the shader access the buffer, we need to build a *descriptor set* that
// contains the buffer.
@ -191,13 +187,13 @@ fn main() {
)
.unwrap();
builder
// The command buffer only does one thing: execute the compute pipeline.
// This is called a *dispatch* operation.
// The command buffer only does one thing: execute the compute pipeline. This is called a
// *dispatch* operation.
//
// Note that we clone the pipeline and the set. Since they are both wrapped around an
// `Arc`, this only clones the `Arc` and not the whole pipeline or set (which aren't
// cloneable anyway). In this example we would avoid cloning them since this is the last
// time we use them, but in a real code you would probably need to clone them.
// Note that we clone the pipeline and the set. Since they are both wrapped in an `Arc`,
// this only clones the `Arc` and not the whole pipeline or set (which aren't cloneable
// anyway). In this example we would avoid cloning them since this is the last time we use
// them, but in real code you would probably need to clone them.
.bind_pipeline_compute(pipeline.clone())
.bind_descriptor_sets(
PipelineBindPoint::Compute,
@ -207,38 +203,37 @@ fn main() {
)
.dispatch([1024, 1, 1])
.unwrap();
// Finish building the command buffer by calling `build`.
let command_buffer = builder.build().unwrap();
// Let's execute this command buffer now.
// To do so, we TODO: this is a bit clumsy, probably needs a shortcut
let future = sync::now(device)
.then_execute(queue, command_buffer)
.unwrap()
// This line instructs the GPU to signal a *fence* once the command buffer has finished
// execution. A fence is a Vulkan object that allows the CPU to know when the GPU has
// reached a certain point.
// We need to signal a fence here because below we want to block the CPU until the GPU has
// reached that point in the execution.
// reached a certain point. We need to signal a fence here because below we want to block
// the CPU until the GPU has reached that point in the execution.
.then_signal_fence_and_flush()
.unwrap();
// Blocks execution until the GPU has finished the operation. This method only exists on the
// future that corresponds to a signalled fence. In other words, this method wouldn't be
// available if we didn't call `.then_signal_fence_and_flush()` earlier.
// The `None` parameter is an optional timeout.
// available if we didn't call `.then_signal_fence_and_flush()` earlier. The `None` parameter
// is an optional timeout.
//
// Note however that dropping the `future` variable (with `drop(future)` for example) would
// block execution as well, and this would be the case even if we didn't call
// `.then_signal_fence_and_flush()`.
// Therefore the actual point of calling `.then_signal_fence_and_flush()` and `.wait()` is to
// make things more explicit. In the future, if the Rust language gets linear types vulkano may
// get modified so that only fence-signalled futures can get destroyed like this.
// `.then_signal_fence_and_flush()`. Therefore the actual point of calling
// `.then_signal_fence_and_flush()` and `.wait()` is to make things more explicit. In the
// future, if the Rust language gets linear types vulkano may get modified so that only
// fence-signalled futures can get destroyed like this.
future.wait(None).unwrap();
// Now that the GPU is done, the content of the buffer should have been modified. Let's
// check it out.
// The call to `read()` would return an error if the buffer was still in use by the GPU.
// Now that the GPU is done, the content of the buffer should have been modified. Let's check
// it out. The call to `read()` would return an error if the buffer was still in use by the
// GPU.
let data_buffer_content = data_buffer.read().unwrap();
for n in 0..65536u32 {
assert_eq!(data_buffer_content[n as usize], n * 12);

View File

@ -9,7 +9,6 @@
// Modified triangle example to show `SubbufferAllocator`.
use bytemuck::{Pod, Zeroable};
use std::{
sync::Arc,
time::{SystemTime, UNIX_EPOCH},
@ -17,7 +16,7 @@ use std::{
use vulkano::{
buffer::{
allocator::{SubbufferAllocator, SubbufferAllocatorCreateInfo},
BufferUsage,
BufferContents, BufferUsage,
},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
@ -60,7 +59,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -103,7 +101,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -156,8 +154,8 @@ fn main() {
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(Clone, Copy, BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -177,7 +175,7 @@ fn main() {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -185,14 +183,14 @@ fn main() {
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -200,7 +198,7 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
",
}
}
@ -277,7 +275,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -296,7 +294,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -387,7 +385,7 @@ fn main() {
previous_frame_end = Some(Box::new(sync::now(device.clone())) as Box<_>);
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(Box::new(sync::now(device.clone())) as Box<_>);
}
}
@ -397,7 +395,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,

View File

@ -35,8 +35,8 @@ use winit::{
};
fn main() {
// The start of this example is exactly the same as `triangle`. You should read the
// `triangle` example if you haven't done so yet.
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
@ -44,7 +44,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -192,7 +191,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -209,7 +208,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -233,29 +232,28 @@ fn main() {
SubpassContents::Inline,
)
.unwrap()
// Clear attachments with clear values and rects information, all the rects will be cleared by the same value
// Note that the ClearRect offsets and extents are not affected by the viewport,
// they are directly applied to the rendering image
// Clear attachments with clear values and rects information. All the rects will be
// cleared by the same value. Note that the ClearRect offsets and extents are not
// affected by the viewport, they are directly applied to the rendering image.
.clear_attachments(
[ClearAttachment::Color {
color_attachment: 0,
clear_value: [1.0, 0.0, 0.0, 1.0].into(),
}],
[
// Fixed offset and extent
// Fixed offset and extent.
ClearRect {
offset: [0, 0],
extent: [100, 100],
array_layers: 0..1,
},
// Fixed offset
// Relative extent
// Fixed offset, relative extent.
ClearRect {
offset: [100, 150],
extent: [width / 4, height / 4],
array_layers: 0..1,
},
// Relative offset and extent
// Relative offset and extent.
ClearRect {
offset: [width / 2, height / 2],
extent: [width / 3, height / 5],
@ -289,7 +287,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -298,7 +296,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,

View File

@ -37,7 +37,7 @@ fn main() {
// https://vulkan.lunarg.com/doc/view/1.0.13.0/windows/layers.html
//
// .. but if you just want a template of code that has everything ready to go then follow
// this example. First, enable debugging using this extension: VK_EXT_debug_utils
// this example. First, enable debugging using this extension: `VK_EXT_debug_utils`.
let extensions = InstanceExtensions {
ext_debug_utils: true,
..InstanceExtensions::empty()
@ -45,12 +45,12 @@ fn main() {
let library = VulkanLibrary::new().unwrap();
// You also need to specify (unless you've used the methods linked above) which debugging layers
// your code should use. Each layer is a bunch of checks or messages that provide information of
// some sort.
// You also need to specify (unless you've used the methods linked above) which debugging
// layers your code should use. Each layer is a bunch of checks or messages that provide
// information of some sort.
//
// The main layer you might want is: VK_LAYER_LUNARG_standard_validation
// This includes a number of the other layers for you and is quite detailed.
// The main layer you might want is `VK_LAYER_LUNARG_standard_validation`. This includes a
// number of the other layers for you and is quite detailed.
//
// Additional layers can be installed (gpu vendor provided, something you found on GitHub, etc)
// and you should verify that list for safety - Vulkano will return an error if you specify
@ -61,28 +61,26 @@ fn main() {
println!("\t{}", l.name());
}
// NOTE: To simplify the example code we won't verify these layer(s) are actually in the layers list:
// NOTE: To simplify the example code we won't verify these layer(s) are actually in the layers
// list.
let layers = vec!["VK_LAYER_KHRONOS_validation".to_owned()];
// Important: pass the extension(s) and layer(s) when creating the vulkano instance
// Important: pass the extension(s) and layer(s) when creating the vulkano instance.
let instance = Instance::new(
library,
InstanceCreateInfo {
enabled_extensions: extensions,
enabled_layers: layers,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
)
.expect("failed to create Vulkan instance");
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// After creating the instance we must register the debugging callback. //
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Note: If you let this debug_callback binding fall out of scope then the callback will stop providing events
// After creating the instance we must register the debug callback.
//
// NOTE: If you let this debug_callback binding fall out of scope then the callback will stop
// providing events.
let _debug_callback = unsafe {
DebugUtilsMessenger::new(
instance.clone(),
@ -130,10 +128,7 @@ fn main() {
.ok()
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Create Vulkan objects in the same way as the other examples //
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Create Vulkan objects in the same way as the other examples.
let device_extensions = DeviceExtensions {
..DeviceExtensions::empty()
};
@ -198,5 +193,6 @@ fn main() {
)
.unwrap();
// (At this point you should see a bunch of messages printed to the terminal window - have fun debugging!)
// (At this point you should see a bunch of messages printed to the terminal window -
// have fun debugging!)
}

View File

@ -122,14 +122,13 @@ impl AmbientLightingSystem {
/// - `color_input` is an image containing the albedo of each object of the scene. It is the
/// result of the deferred pass.
/// - `ambient_color` is the color to apply.
///
pub fn draw(
&self,
viewport_dimensions: [u32; 2],
color_input: Arc<dyn ImageViewAbstract + 'static>,
ambient_color: [f32; 3],
) -> SecondaryAutoCommandBuffer {
let push_constants = fs::ty::PushConstants {
let push_constants = fs::PushConstants {
color: [ambient_color[0], ambient_color[1], ambient_color[2], 1.0],
};
@ -177,21 +176,22 @@ impl AmbientLightingSystem {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
// The `color_input` parameter of the `draw` method.
@ -209,6 +209,7 @@ void main() {
vec3 in_diffuse = subpassLoad(u_diffuse).rgb;
f_color.rgb = push_constants.color.rgb * in_diffuse;
f_color.a = 1.0;
}",
}
",
}
}

View File

@ -132,7 +132,6 @@ impl DirectionalLightingSystem {
/// result of the deferred pass.
/// - `direction` is the direction of the light in world coordinates.
/// - `color` is the color to apply.
///
pub fn draw(
&self,
viewport_dimensions: [u32; 2],
@ -141,7 +140,7 @@ impl DirectionalLightingSystem {
direction: Vector3<f32>,
color: [f32; 3],
) -> SecondaryAutoCommandBuffer {
let push_constants = fs::ty::PushConstants {
let push_constants = fs::PushConstants {
color: [color[0], color[1], color[2], 1.0],
direction: direction.extend(0.0).into(),
};
@ -193,21 +192,22 @@ impl DirectionalLightingSystem {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
// The `color_input` parameter of the `draw` method.
@ -226,9 +226,11 @@ layout(location = 0) out vec4 f_color;
void main() {
vec3 in_normal = normalize(subpassLoad(u_normals).rgb);
// If the normal is perpendicular to the direction of the lighting, then `light_percent` will
// be 0. If the normal is parallel to the direction of the lightin, then `light_percent` will
// be 1. Any other angle will yield an intermediate value.
// If the normal is perpendicular to the direction of the lighting, then
// `light_percent` will be 0. If the normal is parallel to the direction of the
// lightin, then `light_percent` will be 1. Any other angle will yield an
// intermediate value.
float light_percent = -dot(push_constants.direction.xyz, in_normal);
// `light_percent` must not go below 0.0. There's no such thing as negative lighting.
light_percent = max(light_percent, 0.0);
@ -236,6 +238,7 @@ void main() {
vec3 in_diffuse = subpassLoad(u_diffuse).rgb;
f_color.rgb = light_percent * push_constants.color.rgb * in_diffuse;
f_color.a = 1.0;
}",
}
",
}
}

View File

@ -12,8 +12,7 @@
// The main code is in the `system` module, while the other modules implement the different kinds
// of lighting sources.
use bytemuck::{Pod, Zeroable};
use vulkano::pipeline::graphics::vertex_input::Vertex;
use vulkano::{buffer::BufferContents, pipeline::graphics::vertex_input::Vertex};
pub use self::system::{DrawPass, Frame, FrameSystem, LightingPass, Pass};
@ -22,8 +21,8 @@ mod directional_lighting_system;
mod point_lighting_system;
mod system;
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct LightingVertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],

View File

@ -138,7 +138,6 @@ impl PointLightingSystem {
/// coordinates of each pixel being processed.
/// - `position` is the position of the spot light in world coordinates.
/// - `color` is the color of the light.
///
#[allow(clippy::too_many_arguments)]
pub fn draw(
&self,
@ -150,7 +149,7 @@ impl PointLightingSystem {
position: Vector3<f32>,
color: [f32; 3],
) -> SecondaryAutoCommandBuffer {
let push_constants = fs::ty::PushConstants {
let push_constants = fs::PushConstants {
screen_to_world: screen_to_world.into(),
color: [color[0], color[1], color[2], 1.0],
position: position.extend(0.0).into(),
@ -204,7 +203,7 @@ impl PointLightingSystem {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -213,14 +212,15 @@ layout(location = 0) out vec2 v_screen_coords;
void main() {
v_screen_coords = position;
gl_Position = vec4(position, 0.0, 1.0);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
// The `color_input` parameter of the `draw` method.
@ -244,19 +244,22 @@ layout(location = 0) out vec4 f_color;
void main() {
float in_depth = subpassLoad(u_depth).x;
// Any depth superior or equal to 1.0 means that the pixel has been untouched by the deferred
// pass. We don't want to deal with them.
// Any depth superior or equal to 1.0 means that the pixel has been untouched by
// the deferred pass. We don't want to deal with them.
if (in_depth >= 1.0) {
discard;
}
// Find the world coordinates of the current pixel.
vec4 world = push_constants.screen_to_world * vec4(v_screen_coords, in_depth, 1.0);
world /= world.w;
vec3 in_normal = normalize(subpassLoad(u_normals).rgb);
vec3 light_direction = normalize(push_constants.position.xyz - world.xyz);
// Calculate the percent of lighting that is received based on the orientation of the normal
// and the direction of the light.
// Calculate the percent of lighting that is received based on the orientation of
// the normal and the direction of the light.
float light_percent = max(-dot(light_direction, in_normal), 0.0);
float light_distance = length(push_constants.position.xyz - world.xyz);
@ -266,6 +269,7 @@ void main() {
vec3 in_diffuse = subpassLoad(u_diffuse).rgb;
f_color.rgb = push_constants.color.rgb * light_percent * in_diffuse;
f_color.a = 1.0;
}",
}
",
}
}

View File

@ -69,7 +69,6 @@ impl FrameSystem {
/// - `final_output_format` is the format of the image that will later be passed to the
/// `frame()` method. We need to know that in advance. If that format ever changes, we have
/// to create a new `FrameSystem`.
///
pub fn new(
gfx_queue: Arc<Queue>,
final_output_format: Format,
@ -151,8 +150,8 @@ impl FrameSystem {
)
.unwrap();
// For now we create three temporary images with a dimension of 1 by 1 pixel.
// These images will be replaced the first time we call `frame()`.
// For now we create three temporary images with a dimension of 1 by 1 pixel. These images
// will be replaced the first time we call `frame()`.
let diffuse_buffer = ImageView::new_default(
AttachmentImage::with_usage(
&memory_allocator,
@ -188,8 +187,8 @@ impl FrameSystem {
gfx_queue.device().clone(),
));
// Initialize the three lighting systems.
// Note that we need to pass to them the subpass where they will be executed.
// Initialize the three lighting systems. Note that we need to pass to them the subpass
// where they will be executed.
let lighting_subpass = Subpass::from(render_pass.clone(), 1).unwrap();
let ambient_lighting_system = AmbientLightingSystem::new(
gfx_queue.clone(),
@ -245,7 +244,6 @@ impl FrameSystem {
/// - `final_image` is the image we are going to draw to.
/// - `world_to_framebuffer` is the matrix that will be used to convert from 3D coordinates in
/// the world into 2D coordinates on the framebuffer.
///
pub fn frame<F>(
&mut self,
before_future: F,
@ -454,7 +452,6 @@ pub struct DrawPass<'f, 's: 'f> {
impl<'f, 's: 'f> DrawPass<'f, 's> {
/// Appends a command that executes a secondary command buffer that performs drawing.
#[inline]
pub fn execute<C>(&mut self, command_buffer: C)
where
C: SecondaryCommandBufferAbstract + 'static,
@ -468,14 +465,12 @@ impl<'f, 's: 'f> DrawPass<'f, 's> {
}
/// Returns the dimensions in pixels of the viewport.
#[inline]
pub fn viewport_dimensions(&self) -> [u32; 2] {
self.frame.framebuffer.extent()
}
/// Returns the 4x4 matrix that turns world coordinates into 2D coordinates on the framebuffer.
#[allow(dead_code)]
#[inline]
pub fn world_to_framebuffer_matrix(&self) -> Matrix4<f32> {
self.frame.world_to_framebuffer
}

View File

@ -11,15 +11,15 @@
//
// The idea behind deferred lighting is to render the scene in two steps.
//
// First you draw all the objects of the scene. But instead of calculating the color they will
// have on the screen, you output their characteristics such as their diffuse color and their
// normals, and write this to images.
// First you draw all the objects of the scene. But instead of calculating the color they will have
// on the screen, you output their characteristics such as their diffuse color and their normals,
// and write this to images.
//
// After all the objects are drawn, you should obtain several images that contain the
// characteristics of each pixel.
//
// Then you apply lighting to the scene. In other words you draw to the final image by taking
// these intermediate images and the various lights of the scene as input.
// Then you apply lighting to the scene. In other words you draw to the final image by taking these
// intermediate images and the various lights of the scene as input.
//
// This technique allows you to apply tons of light sources to a scene, which would be too
// expensive otherwise. It has some drawbacks, which are the fact that transparent objects must be
@ -66,7 +66,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -109,7 +108,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -216,7 +215,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
let new_images = new_images
.into_iter()
@ -235,7 +234,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -285,7 +284,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage, Subbuffer},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder,
CommandBufferInheritanceInfo, CommandBufferUsage, SecondaryAutoCommandBuffer,
@ -120,8 +119,8 @@ impl TriangleDrawSystem {
}
}
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct TriangleVertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -130,21 +129,22 @@ struct TriangleVertex {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -153,6 +153,7 @@ layout(location = 1) out vec3 f_normal;
void main() {
f_color = vec4(1.0, 1.0, 1.0, 1.0);
f_normal = vec3(0.0, 0.0, 1.0);
}"
}
",
}
}

View File

@ -9,10 +9,9 @@
// This example demonstrates how to use dynamic uniform buffers.
//
// Dynamic uniform and storage buffers store buffer data for different
// calls in one large buffer. Each draw or dispatch call can specify an
// offset into the buffer to read object data from, without having to
// rebind descriptor sets.
// Dynamic uniform and storage buffers store buffer data for different calls in one large buffer.
// Each draw or dispatch call can specify an offset into the buffer to read object data from,
// without having to rebind descriptor sets.
use std::{iter::repeat, mem::size_of};
use vulkano::{
@ -40,7 +39,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -74,7 +72,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -94,29 +92,29 @@ fn main() {
mod shader {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 12) in;
// Uniform Buffer Object
// Uniform buffer.
layout(set = 0, binding = 0) uniform InData {
uint data;
} ubo;
uint index;
} ub;
// Output Buffer
// Output buffer.
layout(set = 0, binding = 1) buffer OutData {
uint data[];
} data;
};
// Toy shader that only runs for the index specified in `ubo`.
// Toy shader that only runs for the index specified in `ub`.
void main() {
uint index = gl_GlobalInvocationID.x;
if(index == ubo.data) {
data.data[index] = index;
if (index == ub.index) {
data[index] = index;
}
}
"
",
}
}
@ -138,29 +136,31 @@ fn main() {
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
// Declare input buffer.
// Data in a dynamic buffer **MUST** be aligned to min_uniform_buffer_offset_align
// or min_storage_buffer_offset_align, depending on the type of buffer.
let data: Vec<u8> = vec![3, 11, 7];
// Create the input buffer. Data in a dynamic buffer **MUST** be aligned to
// `min_uniform_buffer_offset_align` or `min_storage_buffer_offset_align`, depending on the
// type of buffer.
let data: Vec<u32> = vec![3, 11, 7];
let min_dynamic_align = device
.physical_device()
.properties()
.min_uniform_buffer_offset_alignment as usize;
println!("Minimum uniform buffer offset alignment: {min_dynamic_align}");
println!("Input: {data:?}");
// Round size up to the next multiple of align.
let align = (size_of::<u32>() + min_dynamic_align - 1) & !(min_dynamic_align - 1);
let aligned_data = {
let mut aligned_data = Vec::with_capacity(align * data.len());
for elem in data {
let bytes = elem.to_ne_bytes();
// Fill up the buffer with data
for b in bytes {
aligned_data.push(b);
}
// Zero out any padding needed for alignment
// Fill up the buffer with data.
aligned_data.extend(bytes);
// Zero out any padding needed for alignment.
aligned_data.extend(repeat(0).take(align - bytes.len()));
}
aligned_data
};
@ -196,7 +196,7 @@ fn main() {
WriteDescriptorSet::buffer_with_range(
0,
input_buffer,
0..size_of::<shader::ty::InData>() as DeviceSize,
0..size_of::<shader::InData>() as DeviceSize,
),
WriteDescriptorSet::buffer(1, output_buffer.clone()),
],

View File

@ -7,12 +7,11 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// This example demonstrates how to compute and load Compute Shader local size
// layout in runtime through specialization constants using Physical Device metadata.
// This example demonstrates how to define the compute shader local size layout at runtime through
// specialization constants while considering the physical device properties.
//
// Workgroup parallelism capabilities are varying between GPUs and setting them
// properly is important to achieve maximal performance that particular device
// can provide.
// Workgroup parallelism capabilities vary between GPUs and setting them properly is important to
// achieve the maximal performance that particular device can provide.
use std::{fs::File, io::BufWriter, path::Path};
use vulkano::{
@ -43,13 +42,11 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: InstanceExtensions {
// This extension is required to obtain physical device metadata
// about the device workgroup size limits
// This extension is required to obtain physical device metadata about the device
// workgroup size limits.
khr_get_physical_device_properties2: true,
..InstanceExtensions::empty()
},
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -82,7 +79,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -102,23 +99,20 @@ fn main() {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
// We set local_size_x and local_size_y to be variable configurable
// values through Specialization Constants. Values 1 and 2 define
// constant_id (1 and 2 correspondingly) and default values of
// the constants both. The `local_size_z = 1` here is an ordinary
// built-in value of the local size in Z axis.
// We set `local_size_x` and `local_size_y` to be variables configurable values
// through specialization constants. Values `1` and `2` both define a constant ID
// as well as a default value of 1 and 2 of the constants respecively. The
// `local_size_z = 1` here is an ordinary constant of the local size on the Z axis.
//
// Unfortunately current GLSL language capabilities doesn't let us
// define exact names of the constants so we will have to use
// anonymous constants instead. See below on how to provide their
// values in run time.
// Unfortunately current GLSL language capabilities doesn't let us define exact
// names of the constants so we will have to use anonymous constants instead. See
// below for how to provide their values at runtime.
//
// Please NOTE that the constant_id in local_size layout must be
// positive values. Zero value lead to runtime failure on nVidia
// devices due to a known bug in nVidia driver.
// NOTE: The constant ID in `local_size` layout must be positive values. Zeros lead
// to runtime failure on NVIDIA devices due to a known bug in the driver.
layout(local_size_x_id = 1, local_size_y_id = 2, local_size_z = 1) in;
// We can still define more constants in the Shader
@ -129,7 +123,7 @@ fn main() {
layout(set = 0, binding = 0, rgba8) uniform writeonly image2D img;
void main() {
// Colorful Mandelbrot fractal
// Colorful Mandelbrot fractal.
vec2 norm_coordinates = (gl_GlobalInvocationID.xy + vec2(0.5)) / vec2(imageSize(img));
vec2 c = (norm_coordinates - vec2(0.5)) * 2.0 - vec2(1.0, 0.0);
@ -151,30 +145,30 @@ fn main() {
imageStore(img, ivec2(gl_GlobalInvocationID.xy), to_write);
}
"
",
}
}
let shader = cs::load(device.clone()).unwrap();
// Fetching subgroup size from the Physical Device metadata to compute appropriate
// Compute Shader local size properties.
// Fetching subgroup size from the physical device properties to determine an appropriate
// compute shader local size.
//
// Most of the drivers provide this metadata, but some of the drivers don't.
// In this case we can find appropriate value in this table: https://vulkan.gpuinfo.org/
// or just use fallback constant for simplicity, but failure to set proper
// local size can lead to significant performance penalty.
// Most of the drivers provide this property, but some of the drivers don't. In that case we
// can find an appropriate value using this tool: https://vulkan.gpuinfo.org, or just use a
// fallback constant for simplicity, but failure to set a proper local size can lead to a
// significant performance penalty.
let (local_size_x, local_size_y) = match device.physical_device().properties().subgroup_size {
Some(subgroup_size) => {
println!("Subgroup size is {subgroup_size}");
// Most of the subgroup values are divisors of 8
// Most of the subgroup values are divisors of 8.
(8, subgroup_size / 8)
}
None => {
println!("This Vulkan driver doesn't provide physical device Subgroup information");
// Using fallback constant
// Using a fallback constant.
(8, 8)
}
};
@ -185,7 +179,8 @@ fn main() {
red: 0.2,
green: 0.5,
blue: 1.0,
constant_1: local_size_x, // specifying local size constants
// Specify the local size constants.
constant_1: local_size_x,
constant_2: local_size_y,
};
let pipeline = ComputePipeline::new(
@ -247,11 +242,8 @@ fn main() {
0,
set,
)
.dispatch([
1024 / local_size_x, // Note that dispatch dimensions must be
1024 / local_size_y, // proportional to local size
1,
])
// Note that dispatch dimensions must be proportional to the local size.
.dispatch([1024 / local_size_x, 1024 / local_size_y, 1])
.unwrap()
.copy_image_to_buffer(CopyImageToBufferInfo::image_buffer(image, buf.clone()))
.unwrap();

View File

@ -8,14 +8,13 @@ fn main() {
// TODO: Can this be demonstrated for other platforms as well?
#[cfg(target_os = "linux")]
mod linux {
use bytemuck::{Pod, Zeroable};
use glium::glutin::{self, platform::unix::HeadlessContextExt};
use std::{
sync::{Arc, Barrier},
time::Instant,
};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage, Subbuffer},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder,
CommandBufferUsage, RenderPassBeginInfo, SemaphoreSubmitInfo, SubmitInfo,
@ -69,7 +68,7 @@ mod linux {
pub fn main() {
let event_loop = EventLoop::new();
// For some reason, this must be created before the vulkan window
// For some reason, this must be created before the Vulkan window.
let hrb = glutin::ContextBuilder::new()
.with_gl_debug_flag(true)
.with_gl(glutin::GlRequest::Latest)
@ -82,11 +81,13 @@ mod linux {
.build_surfaceless(&event_loop)
.unwrap();
// Used for checking device and driver UUIDs.
let display = glium::HeadlessRenderer::with_debug(
hrb_vk,
glium::debug::DebugCallbackBehavior::PrintAll,
)
.unwrap(); // Used for checking device and driver UUIDs
.unwrap();
let (
device,
_instance,
@ -293,7 +294,7 @@ mod linux {
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => {
return
}
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -312,7 +313,7 @@ mod linux {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -375,7 +376,7 @@ mod linux {
previous_frame_end = Some(vulkano::sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(vulkano::sync::now(device.clone()).boxed());
}
};
@ -386,8 +387,8 @@ mod linux {
});
}
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct MyVertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -427,7 +428,6 @@ mod linux {
}
.union(&required_extensions),
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
@ -444,7 +444,7 @@ mod linux {
msg.layer_prefix.unwrap_or("unknown"),
msg.ty,
msg.severity,
msg.description
msg.description,
);
})),
)
@ -695,28 +695,30 @@ mod linux {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
layout(location = 0) out vec2 tex_coords;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
tex_coords = position + vec2(0.5);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 tex_coords;
layout(location = 0) out vec4 f_color;
layout(set = 0, binding = 0) uniform sampler2D tex;
void main() {
f_color = texture(tex, tex_coords);
}"
}
",
}
}
}

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::{io::Cursor, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, BlitImageInfo,
BufferImageCopy, ClearColorImageInfo, CommandBufferUsage, CopyBufferToImageInfo,
@ -57,8 +56,8 @@ use winit::{
};
fn main() {
// The start of this example is exactly the same as `triangle`. You should read the
// `triangle` example if you haven't done so yet.
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
@ -66,7 +65,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -161,8 +159,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -255,12 +253,12 @@ fn main() {
)
.unwrap();
// here, we perform image copying and blitting on the same image
// Here, we perform image copying and blitting on the same image.
uploads
// clear the image buffer
// Clear the image buffer.
.clear_color_image(ClearColorImageInfo::image(image.clone()))
.unwrap()
// put our image in the top left corner
// Put our image in the top left corner.
.copy_buffer_to_image(CopyBufferToImageInfo {
regions: [BufferImageCopy {
image_subresource: image.subresource_layers(),
@ -271,7 +269,7 @@ fn main() {
..CopyBufferToImageInfo::buffer_image(buffer, image.clone())
})
.unwrap()
// copy from the top left corner to the bottom right corner
// Copy from the top left corner to the bottom right corner.
.copy_image(CopyImageInfo {
// Copying within the same image requires the General layout if the source and
// destination subresources overlap.
@ -289,9 +287,9 @@ fn main() {
..CopyImageInfo::images(image.clone(), image.clone())
})
.unwrap()
// blit from the bottom right corner to the top right corner (flipped)
// Blit from the bottom right corner to the top right corner (flipped).
.blit_image(BlitImageInfo {
// Same for blitting.
// Same as above applies for blitting.
src_image_layout: ImageLayout::General,
dst_image_layout: ImageLayout::General,
regions: [ImageBlit {
@ -301,7 +299,7 @@ fn main() {
[img_size[0] * 2, img_size[1] * 2, 1],
],
dst_subresource: image.subresource_layers(),
// swapping the two corners results in flipped image
// Swapping the two corners results in flipped image.
dst_offsets: [
[img_size[0] * 2 - 1, img_size[1] - 1, 0],
[img_size[0], 0, 1],
@ -394,7 +392,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -410,7 +408,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -470,7 +468,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -479,7 +477,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,
@ -507,7 +505,7 @@ fn window_size_dependent_setup(
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -516,14 +514,15 @@ layout(location = 0) out vec2 tex_coords;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
tex_coords = position + vec2(0.5);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 tex_coords;
@ -533,6 +532,7 @@ layout(set = 0, binding = 0) uniform sampler2D tex;
void main() {
f_color = texture(tex, tex_coords);
}"
}
",
}
}

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::{io::Cursor, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassContents,
@ -55,8 +54,8 @@ use winit::{
};
fn main() {
// The start of this example is exactly the same as `triangle`. You should read the
// `triangle` example if you haven't done so yet.
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
@ -64,7 +63,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -159,8 +157,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -323,7 +321,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -339,7 +337,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -399,7 +397,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -408,7 +406,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,
@ -436,7 +434,7 @@ fn window_size_dependent_setup(
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -445,14 +443,15 @@ layout(location = 0) out vec2 tex_coords;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
tex_coords = position + vec2(0.5);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 tex_coords;
@ -462,6 +461,7 @@ layout(set = 0, binding = 0) uniform sampler2D tex;
void main() {
f_color = texture(tex, tex_coords);
}"
}
",
}
}

View File

@ -7,19 +7,18 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// An immutable sampler is a sampler that is integrated into the descriptor set layout
// (and thus pipeline layout), instead of being written to an individual descriptor set.
// Consequently, all descriptor sets with this layout will share the same sampler.
// An immutable sampler is a sampler that is integrated into the descriptor set layout (and thus
// pipeline layout), instead of being written to an individual descriptor set. Consequently, all
// descriptor sets with this layout will share the same sampler.
//
// This example is almost identical to the image example, but with two differences, which have
// been commented:
// - The sampler is added to the descriptor set layout at pipeline creation.
// - No sampler is included when building a descriptor set.
use bytemuck::{Pod, Zeroable};
use std::{io::Cursor, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassContents,
@ -70,7 +69,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -165,8 +163,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -273,8 +271,7 @@ fn main() {
.color_blend_state(ColorBlendState::new(subpass.num_color_attachments()).blend_alpha())
.render_pass(subpass)
.with_auto_layout(device.clone(), |layout_create_infos| {
// Modify the auto-generated layout by setting an immutable sampler to
// set 0 binding 0.
// Modify the auto-generated layout by setting an immutable sampler to set 0 binding 0.
let binding = layout_create_infos[0].bindings.get_mut(&0).unwrap();
binding.immutable_samplers = vec![sampler];
})
@ -282,7 +279,8 @@ fn main() {
let layout = pipeline.layout().set_layouts().get(0).unwrap();
// Use `image_view` instead of `image_view_sampler`, since the sampler is already in the layout.
// Use `image_view` instead of `image_view_sampler`, since the sampler is already in the
// layout.
let set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
layout.clone(),
@ -336,7 +334,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -352,7 +350,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -412,7 +410,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -421,7 +419,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,
@ -449,7 +447,7 @@ fn window_size_dependent_setup(
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -458,14 +456,15 @@ layout(location = 0) out vec2 tex_coords;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
tex_coords = position + vec2(0.5);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 tex_coords;
@ -475,6 +474,7 @@ layout(set = 0, binding = 0) uniform sampler2D tex;
void main() {
f_color = texture(tex, tex_coords);
}"
}
",
}
}

View File

@ -12,24 +12,22 @@
// Indirect draw calls allow us to issue a draw without needing to know the number of vertices
// until later when the draw is executed by the GPU.
//
// This is used in situations where vertices are being generated on the GPU, such as a GPU
// particle simulation, and the exact number of output vertices cannot be known until
// the compute shader has run.
// This is used in situations where vertices are being generated on the GPU, such as a GPU particle
// simulation, and the exact number of output vertices cannot be known until the compute shader has
// run.
//
// In this example the compute shader is trivial and the number of vertices does not change.
// However is does demonstrate that each compute instance atomically updates the vertex
// counter before filling the vertex buffer.
// However is does demonstrate that each compute instance atomically updates the vertex counter
// before filling the vertex buffer.
//
// For an explanation of how the rendering of the triangles takes place see the `triangle.rs`
// example.
//
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{
allocator::{SubbufferAllocator, SubbufferAllocatorCreateInfo},
BufferUsage,
BufferContents, BufferUsage,
},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
@ -76,7 +74,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -174,7 +171,7 @@ fn main() {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
// The triangle vertex positions.
@ -183,14 +180,14 @@ fn main() {
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r#"
#version 450
layout(location = 0) out vec4 f_color;
@ -198,16 +195,17 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
"#,
}
}
// A simple compute shader that generates vertices. It has two buffers bound: the first is where we output the vertices, the second
// is the IndirectDrawArgs struct we passed the draw_indirect so we can set the number to vertices to draw
// A simple compute shader that generates vertices. It has two buffers bound: the first is
// where we output the vertices, the second is the `IndirectDrawArgs` struct we passed the
// `draw_indirect` so we can set the number to vertices to draw.
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 16, local_size_y = 1, local_size_z = 1) in;
@ -226,9 +224,10 @@ fn main() {
void main() {
uint idx = gl_GlobalInvocationID.x;
// each thread of compute shader is going to increment the counter, so we need to use atomic
// operations for safety. The previous value of the counter is returned so that gives us
// the offset into the vertex buffer this thread can write it's vertices into.
// Each invocation of the compute shader is going to increment the counter, so
// we need to use atomic operations for safety. The previous value of the
// counter is returned so that gives us the offset into the vertex buffer this
// thread can write it's vertices into.
uint offset = atomicAdd(vertices, 6);
vec2 center = vec2(-0.8, -0.8) + idx * vec2(0.1, 0.1);
@ -239,7 +238,7 @@ fn main() {
triangles.pos[offset + 4] = center + vec2(0.025, 0.01725);
triangles.pos[offset + 5] = center + vec2(-0.025, 0.01725);
}
"
",
}
}
@ -292,10 +291,10 @@ fn main() {
)
.unwrap();
// # Vertex Types
// `Vertex` is the vertex type that will be output from the compute shader and be input to the vertex shader.
// `Vertex` is the vertex type that will be output from the compute shader and be input to the
// vertex shader.
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -355,7 +354,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -374,15 +373,16 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
recreate_swapchain = true;
}
// Allocate a GPU buffer to hold the arguments for this frames draw call. The compute
// shader will only update vertex_count, so set the other parameters correctly here.
// Allocate a buffer to hold the arguments for this frame's draw call. The compute
// shader will only update `vertex_count`, so set the other parameters correctly
// here.
let indirect_commands = [DrawIndirectCommand {
vertex_count: 0,
instance_count: 1,
@ -397,15 +397,15 @@ fn main() {
.unwrap()
.copy_from_slice(&indirect_commands);
// Allocate a GPU buffer to hold this frames vertices. This needs to be large enough to hold
// the worst case number of vertices generated by the compute shader
// Allocate a buffer to hold this frame's vertices. This needs to be large enough
// to hold the worst case number of vertices generated by the compute shader.
let iter = (0..(6 * 16)).map(|_| Vertex { position: [0.0; 2] });
let vertices = vertex_pool.allocate_slice(iter.len() as _).unwrap();
for (o, i) in vertices.write().unwrap().iter_mut().zip(iter) {
*o = i;
}
// Pass the two buffers to the compute shader
// Pass the two buffers to the compute shader.
let layout = compute_pipeline.layout().set_layouts().get(0).unwrap();
let cs_desciptor_set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
@ -424,8 +424,8 @@ fn main() {
)
.unwrap();
// First in the command buffer we dispatch the compute shader to generate the vertices and fill out the draw
// call arguments
// First in the command buffer we dispatch the compute shader to generate the
// vertices and fill out the draw call arguments.
builder
.bind_pipeline_compute(compute_pipeline.clone())
.bind_descriptor_sets(
@ -446,11 +446,11 @@ fn main() {
SubpassContents::Inline,
)
.unwrap()
// The indirect draw call is placed in the command buffer with a reference to the GPU buffer that will
// contain the arguments when the draw is executed on the GPU
.set_viewport(0, [viewport.clone()])
.bind_pipeline_graphics(render_pipeline.clone())
.bind_vertex_buffers(0, vertices)
// The indirect draw call is placed in the command buffer with a reference to
// the buffer that will contain the arguments for the draw.
.draw_indirect(indirect_buffer)
.unwrap()
.end_render_pass()
@ -478,7 +478,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -488,7 +488,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,

View File

@ -12,10 +12,9 @@
// This is a simple, modified version of the `triangle.rs` example that demonstrates how we can use
// the "instancing" technique with vulkano to draw many instances of the triangle.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
RenderPassBeginInfo, SubpassContents,
@ -51,22 +50,17 @@ use winit::{
window::{Window, WindowBuilder},
};
// # Vertex Types
//
// Seeing as we are going to use the `OneVertexOneInstanceDefinition` vertex definition for our
// graphics pipeline, we need to define two vertex types:
//
// 1. `TriangleVertex` is the vertex type that we will use to describe the triangle's geometry.
/// The vertex type that we will be used to describe the triangle's geometry.
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct TriangleVertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
}
// 2. `InstanceData` is the vertex type that describes the unique data per instance.
/// The vertex type that describes the unique data per instance.
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct InstanceData {
#[format(R32G32_SFLOAT)]
position_offset: [f32; 2],
@ -81,7 +75,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -177,8 +170,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
// We now create a buffer that will store the shape of our triangle.
// This triangle is identical to the one in the `triangle.rs` example.
// We now create a buffer that will store the shape of our triangle. This triangle is identical
// to the one in the `triangle.rs` example.
let vertices = [
TriangleVertex {
position: [-0.5, -0.25],
@ -202,8 +195,8 @@ fn main() {
.unwrap()
};
// Now we create another buffer that will store the unique data per instance.
// For this example, we'll have the instances form a 10x10 grid that slowly gets larger.
// Now we create another buffer that will store the unique data per instance. For this example,
// we'll have the instances form a 10x10 grid that slowly gets larger.
let instances = {
let rows = 10;
let cols = 10;
@ -238,7 +231,7 @@ fn main() {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
// The triangle vertex positions.
@ -252,14 +245,14 @@ fn main() {
// Apply the scale and offset for the instance.
gl_Position = vec4(position * scale + position_offset, 0.0, 1.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -267,7 +260,7 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
",
}
}
@ -292,8 +285,8 @@ fn main() {
.unwrap();
let pipeline = GraphicsPipeline::start()
// Use the `BuffersDefinition` to describe to vulkano how the two vertex types
// are expected to be used.
// Use the implementations of the `Vertex` trait to describe to vulkano how the two vertex
// types are expected to be used.
.vertex_input_state([TriangleVertex::per_vertex(), InstanceData::per_instance()])
.vertex_shader(vs.entry_point("main").unwrap(), ())
.input_assembly_state(InputAssemblyState::new())
@ -346,7 +339,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -365,7 +358,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -425,7 +418,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -435,7 +428,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,

View File

@ -7,49 +7,52 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::fractal_compute_pipeline::FractalComputePipeline;
use crate::place_over_frame::RenderPassPlaceOverFrame;
use crate::{
fractal_compute_pipeline::FractalComputePipeline, place_over_frame::RenderPassPlaceOverFrame,
};
use cgmath::Vector2;
use std::sync::Arc;
use std::time::Instant;
use vulkano::command_buffer::allocator::StandardCommandBufferAllocator;
use vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator;
use vulkano::device::Queue;
use vulkano::memory::allocator::StandardMemoryAllocator;
use vulkano::sync::GpuFuture;
use vulkano_util::renderer::{DeviceImageView, VulkanoWindowRenderer};
use vulkano_util::window::WindowDescriptor;
use winit::window::Fullscreen;
use std::{sync::Arc, time::Instant};
use vulkano::{
command_buffer::allocator::StandardCommandBufferAllocator,
descriptor_set::allocator::StandardDescriptorSetAllocator, device::Queue,
memory::allocator::StandardMemoryAllocator, sync::GpuFuture,
};
use vulkano_util::{
renderer::{DeviceImageView, VulkanoWindowRenderer},
window::WindowDescriptor,
};
use winit::{
dpi::PhysicalPosition,
event::{
ElementState, Event, KeyboardInput, MouseButton, MouseScrollDelta, VirtualKeyCode,
WindowEvent,
},
window::Fullscreen,
};
const MAX_ITERS_INIT: u32 = 200;
const MOVE_SPEED: f32 = 0.5;
/// App for exploring Julia and Mandelbrot fractals
/// App for exploring Julia and Mandelbrot fractals.
pub struct FractalApp {
/// Pipeline that computes Mandelbrot & Julia fractals and writes them to an image
/// Pipeline that computes Mandelbrot & Julia fractals and writes them to an image.
fractal_pipeline: FractalComputePipeline,
/// Our render pipeline (pass)
/// Our render pipeline (pass).
pub place_over_frame: RenderPassPlaceOverFrame,
/// Toggle that flips between julia and mandelbrot
/// Toggle that flips between Julia and Mandelbrot.
pub is_julia: bool,
/// Togglet thats stops the movement on Julia
/// Toggle that stops the movement on Julia.
is_c_paused: bool,
/// C is a constant input to Julia escape time algorithm (mouse position).
c: Vector2<f32>,
/// Our zoom level
/// Our zoom level.
scale: Vector2<f32>,
/// Our translation on the complex plane
/// Our translation on the complex plane.
translation: Vector2<f32>,
/// How far should the escape time algorithm run (higher = less performance, more accurate image)
/// How long the escape time algorithm should run (higher = less performance, more accurate
/// image).
pub max_iters: u32,
/// Time tracking, useful for frame independent movement
/// Time tracking, useful for frame independent movement.
time: Instant,
dt: f32,
dt_sum: f32,
@ -113,11 +116,11 @@ Usage:
F: Toggle full-screen
Right mouse: Stop movement in Julia (mouse position determines c)
Esc: Quit\
"
",
);
}
/// Run our compute pipeline and return a future of when the compute is finished
/// Runs our compute pipeline and return a future of when the compute is finished.
pub fn compute(&self, image_target: DeviceImageView) -> Box<dyn GpuFuture> {
self.fractal_pipeline.compute(
image_target,
@ -129,24 +132,24 @@ Usage:
)
}
/// Should the app quit? (on esc)
/// Returns whether the app should quit. (Happens on when pressing ESC.)
pub fn is_running(&self) -> bool {
!self.input_state.should_quit
}
/// Return average fps
/// Returns the average FPS.
pub fn avg_fps(&self) -> f32 {
self.avg_fps
}
/// Delta time in milliseconds
/// Returns the delta time in milliseconds.
pub fn dt(&self) -> f32 {
self.dt * 1000.0
}
/// Update times and dt at the end of each frame
/// Updates times and dt at the end of each frame.
pub fn update_time(&mut self) {
// Each second, update average fps & reset frame count & dt sum
// Each second, update average fps & reset frame count & dt sum.
if self.dt_sum > 1.0 {
self.avg_fps = self.frame_count / self.dt_sum;
self.frame_count = 0.0;
@ -158,17 +161,19 @@ Usage:
self.time = Instant::now();
}
/// Updates app state based on input state
/// Updates app state based on input state.
pub fn update_state_after_inputs(&mut self, renderer: &mut VulkanoWindowRenderer) {
// Zoom in or out
// Zoom in or out.
if self.input_state.scroll_delta > 0. {
self.scale /= 1.05;
} else if self.input_state.scroll_delta < 0. {
self.scale *= 1.05;
}
// Move speed scaled by zoom level
// Move speed scaled by zoom level.
let move_speed = MOVE_SPEED * self.dt * self.scale.x;
// Panning
// Panning.
if self.input_state.pan_up {
self.translation += Vector2::new(0.0, move_speed);
}
@ -181,22 +186,27 @@ Usage:
if self.input_state.pan_left {
self.translation += Vector2::new(-move_speed, 0.0);
}
// Toggle between julia and mandelbrot
// Toggle between Julia and Mandelbrot.
if self.input_state.toggle_julia {
self.is_julia = !self.is_julia;
}
// Toggle c
// Toggle c.
if self.input_state.toggle_c {
self.is_c_paused = !self.is_c_paused;
}
// Update c
// Update c.
if !self.is_c_paused {
// Scale normalized mouse pos between -1.0 and 1.0;
// Scale normalized mouse pos between -1.0 and 1.0.
let mouse_pos = self.input_state.normalized_mouse_pos() * 2.0 - Vector2::new(1.0, 1.0);
// Scale by our zoom (scale) level so when zooming in the movement on julia is not so drastic
// Scale by our zoom (scale) level so when zooming in the movement on Julia is not so
// drastic.
self.c = mouse_pos * self.scale.x;
}
// Update how many iterations we have
// Update how many iterations we have.
if self.input_state.increase_iterations {
self.max_iters += 1;
}
@ -207,11 +217,13 @@ Usage:
self.max_iters -= 1;
}
}
// Randomize our palette
// Randomize our palette.
if self.input_state.randomize_palette {
self.fractal_pipeline.randomize_palette();
}
// Toggle full-screen
// Toggle full-screen.
if self.input_state.toggle_full_screen {
let is_full_screen = renderer.window().fullscreen().is_some();
renderer.window().set_fullscreen(if !is_full_screen {
@ -222,12 +234,12 @@ Usage:
}
}
/// Update input state
/// Update input state.
pub fn handle_input(&mut self, window_size: [f32; 2], event: &Event<()>) {
self.input_state.handle_input(window_size, event);
}
/// reset input state at the end of frame
/// Reset input state at the end of the frame.
pub fn reset_input_state(&mut self) {
self.input_state.reset()
}
@ -240,9 +252,9 @@ fn state_is_pressed(state: ElementState) -> bool {
}
}
/// Just a very simple input state (mappings).
/// Winit only has Pressed and Released events, thus continuous movement needs toggles.
/// Panning is one of those where continuous movement feels better.
/// Just a very simple input state (mappings). Winit only has `Pressed` and `Released` events, thus
/// continuous movement needs toggles. Panning is one of those things where continuous movement
/// feels better.
struct InputState {
pub window_size: [f32; 2],
pub pan_up: bool,
@ -290,7 +302,7 @@ impl InputState {
)
}
// Resets values that should be reset. All incremental mappings and toggles should be reset.
/// Resets values that should be reset. All incremental mappings and toggles should be reset.
fn reset(&mut self) {
*self = InputState {
scroll_delta: 0.0,
@ -319,7 +331,7 @@ impl InputState {
}
}
/// Match keyboard event to our defined inputs
/// Matches keyboard events to our defined inputs.
fn on_keyboard_event(&mut self, input: &KeyboardInput) {
if let Some(key_code) = input.virtual_keycode {
match key_code {
@ -338,7 +350,7 @@ impl InputState {
}
}
/// Update mouse scroll delta
/// Updates mouse scroll delta.
fn on_mouse_wheel_event(&mut self, delta: &MouseScrollDelta) {
let change = match delta {
MouseScrollDelta::LineDelta(_x, y) => *y,

View File

@ -45,7 +45,7 @@ impl FractalComputePipeline {
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
descriptor_set_allocator: Arc<StandardDescriptorSetAllocator>,
) -> FractalComputePipeline {
// Initial colors
// Initial colors.
let colors = vec![
[1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 1.0],
@ -90,7 +90,7 @@ impl FractalComputePipeline {
}
}
/// Randomizes our color palette
/// Randomizes our color palette.
pub fn randomize_palette(&mut self) {
let mut colors = vec![];
for _ in 0..self.palette_size {
@ -120,7 +120,7 @@ impl FractalComputePipeline {
max_iters: u32,
is_julia: bool,
) -> Box<dyn GpuFuture> {
// Resize image if needed
// Resize image if needed.
let img_dims = image.image().dimensions().width_height();
let pipeline_layout = self.pipeline.layout();
let desc_layout = pipeline_layout.set_layouts().get(0).unwrap();
@ -140,15 +140,14 @@ impl FractalComputePipeline {
)
.unwrap();
let push_constants = cs::ty::PushConstants {
let push_constants = cs::PushConstants {
end_color: self.end_color,
c: c.into(),
scale: scale.into(),
translation: translation.into(),
end_color: self.end_color,
palette_size: self.palette_size,
max_iters: max_iters as i32,
is_julia: is_julia as u32,
_dummy0: [0u8; 8], // Required for alignment
};
builder
.bind_pipeline_compute(self.pipeline.clone())
@ -165,7 +164,7 @@ impl FractalComputePipeline {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
@ -180,17 +179,18 @@ layout(set = 0, binding = 1) buffer Palette {
// Our variable inputs as push constants
layout(push_constant) uniform PushConstants {
vec4 end_color;
vec2 c;
vec2 scale;
vec2 translation;
vec4 end_color;
int palette_size;
int max_iters;
bool is_julia;
} push_constants;
// Gets smooth color between current color (determined by iterations) and the next color in the palette
// by linearly interpolating the colors based on: https://linas.org/art-gallery/escape/smooth.html
// Gets smooth color between current color (determined by iterations) and the next
// color in the palette by linearly interpolating the colors based on:
// https://linas.org/art-gallery/escape/smooth.html
vec4 get_color(
int palette_size,
vec4 end_color,
@ -218,10 +218,11 @@ void main() {
float x0 = ar * (push_constants.translation.x + (x_over_width - 0.5) * push_constants.scale.x);
float y0 = push_constants.translation.y + (y_over_height - 0.5) * push_constants.scale.y;
// Julia is like mandelbrot, but instead changing the constant `c` will change the shape
// you'll see. Thus we want to bind the c to mouse position.
// Julia is like mandelbrot, but instead changing the constant `c` will change the
// shape you'll see. Thus we want to bind the c to mouse position.
// With mandelbrot, c = scaled xy position of the image. Z starts from zero.
// With julia, c = any value between the interesting range (-2.0 - 2.0), Z = scaled xy position of the image.
// With julia, c = any value between the interesting range (-2.0 - 2.0),
// Z = scaled xy position of the image.
vec2 c;
vec2 z;
if (push_constants.is_julia) {
@ -234,8 +235,8 @@ void main() {
// Escape time algorithm:
// https://en.wikipedia.org/wiki/Plotting_algorithms_for_the_Mandelbrot_set
// It's an iterative algorithm where the bailout point (number of iterations) will determine
// the color we choose from the palette
// It's an iterative algorithm where the bailout point (number of iterations) will
// determine the color we choose from the palette.
int i;
float len_z;
for (i = 0; i < push_constants.max_iters; i += 1) {
@ -246,7 +247,7 @@ void main() {
len_z = length(z);
// Using 8.0 for bailout limit give a little nicer colors with smooth colors
// 2.0 is enough to 'determine' an escape will happen
// 2.0 is enough to 'determine' an escape will happen.
if (len_z > 8.0) {
break;
}
@ -260,6 +261,7 @@ void main() {
len_z
);
imageStore(img, ivec2(gl_GlobalInvocationID.xy), write_color);
}",
}
",
}
}

View File

@ -7,13 +7,25 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// This is an example demonstrating an application with some more non-trivial functionality.
// It should get you more up to speed with how you can use Vulkano.
//
// It contains:
//
// - A compute pipeline to calculate Mandelbrot and Julia fractals writing them to an image.
// - A graphics pipeline to draw the fractal image over a quad that covers the whole screen.
// - A renderpass rendering that image on the swapchain image.
// - An organized renderer with functionality good enough to copy to other projects.
// - A simple `FractalApp` to handle runtime state.
// - A simple `InputState` to interact with the application.
use crate::app::FractalApp;
use vulkano::image::ImageUsage;
use vulkano::swapchain::PresentMode;
use vulkano::sync::GpuFuture;
use vulkano_util::context::{VulkanoConfig, VulkanoContext};
use vulkano_util::renderer::{VulkanoWindowRenderer, DEFAULT_IMAGE_FORMAT};
use vulkano_util::window::{VulkanoWindows, WindowDescriptor};
use vulkano::{image::ImageUsage, swapchain::PresentMode, sync::GpuFuture};
use vulkano_util::{
context::{VulkanoConfig, VulkanoContext},
renderer::{VulkanoWindowRenderer, DEFAULT_IMAGE_FORMAT},
window::{VulkanoWindows, WindowDescriptor},
};
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
@ -25,17 +37,8 @@ mod fractal_compute_pipeline;
mod pixels_draw_pipeline;
mod place_over_frame;
/// This is an example demonstrating an application with some more non-trivial functionality.
/// It should get you more up to speed with how you can use Vulkano.
/// It contains
/// - Compute pipeline to calculate Mandelbrot and Julia fractals writing them to an image target
/// - Graphics pipeline to draw the fractal image over a quad that covers the whole screen
/// - Renderpass rendering that image over swapchain image
/// - An organized Renderer with functionality good enough to copy to other projects
/// - Simple FractalApp to handle runtime state
/// - Simple Input system to interact with the application
fn main() {
// Create event loop
// Create the event loop.
let mut event_loop = EventLoop::new();
let context = VulkanoContext::new(VulkanoConfig::default());
let mut windows = VulkanoWindows::default();
@ -53,6 +56,7 @@ fn main() {
// Add our render target image onto which we'll be rendering our fractals.
let render_target_id = 0;
let primary_window_renderer = windows.get_primary_renderer_mut().unwrap();
// Make sure the image usage is correct (based on your pipeline).
primary_window_renderer.add_additional_image_view(
render_target_id,
@ -60,16 +64,18 @@ fn main() {
ImageUsage::SAMPLED | ImageUsage::STORAGE | ImageUsage::TRANSFER_DST,
);
// Create app to hold the logic of our fractal explorer
// Create app to hold the logic of our fractal explorer.
let gfx_queue = context.graphics_queue();
// We intend to eventually render on our swapchain, thus we use that format when creating the app here.
// We intend to eventually render on our swapchain, thus we use that format when creating the
// app here.
let mut app = FractalApp::new(
gfx_queue.clone(),
primary_window_renderer.swapchain_format(),
);
app.print_guide();
// Basic loop for our runtime
// Basic loop for our runtime:
// 1. Handle events
// 2. Update state based on events
// 3. Compute & Render
@ -82,7 +88,7 @@ fn main() {
match primary_window_renderer.window_size() {
[w, h] => {
// Skip this frame when minimized
// Skip this frame when minimized.
if w == 0.0 || h == 0.0 {
continue;
}
@ -103,15 +109,17 @@ fn main() {
}
}
/// Handle events and return `bool` if we should quit
/// Handles events and returns a `bool` indicating if we should quit.
fn handle_events(
event_loop: &mut EventLoop<()>,
renderer: &mut VulkanoWindowRenderer,
app: &mut FractalApp,
) -> bool {
let mut is_running = true;
event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match &event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => is_running = false,
@ -123,19 +131,21 @@ fn handle_events(
Event::MainEventsCleared => *control_flow = ControlFlow::Exit,
_ => (),
}
// Pass event for app to handle our inputs
// Pass event for the app to handle our inputs.
app.handle_input(renderer.window_size(), &event);
});
is_running && app.is_running()
}
/// Orchestrate rendering here
/// Orchestrates rendering.
fn compute_then_render(
renderer: &mut VulkanoWindowRenderer,
app: &mut FractalApp,
target_image_id: usize,
) {
// Start frame
// Start the frame.
let before_pipeline_future = match renderer.acquire() {
Err(e) => {
println!("{e}");
@ -143,15 +153,19 @@ fn compute_then_render(
}
Ok(future) => future,
};
// Retrieve target image
// Retrieve the target image.
let image = renderer.get_additional_image_view(target_image_id);
// Compute our fractal (writes to target image). Join future with `before_pipeline_future`.
let after_compute = app.compute(image.clone()).join(before_pipeline_future);
// Render image over frame. Input previous future. Draw on swapchain image
// Render the image over the swapchain image, inputting the previous future.
let after_renderpass_future =
app.place_over_frame
.render(after_compute, image, renderer.swapchain_image_view());
// Finish frame (which presents the view). Input last future. Wait for the future so resources are not in use
// when we render
// Finish the frame (which presents the view), inputting the last future. Wait for the future
// so resources are not in use when we render.
renderer.present(after_renderpass_future, true);
}

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage, Subbuffer},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder,
CommandBufferInheritanceInfo, CommandBufferUsage, SecondaryAutoCommandBuffer,
@ -33,9 +32,9 @@ use vulkano::{
sampler::{Filter, Sampler, SamplerAddressMode, SamplerCreateInfo, SamplerMipmapMode},
};
/// Vertex for textured quads
/// Vertex for textured quads.
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
pub struct TexturedVertex {
#[format(R32G32_SFLOAT)]
pub position: [f32; 2],
@ -67,7 +66,7 @@ pub fn textured_quad(width: f32, height: f32) -> (Vec<TexturedVertex>, Vec<u32>)
)
}
/// A subpass pipeline that fills a quad over frame
/// A subpass pipeline that fills a quad over frame.
pub struct PixelsDrawPipeline {
gfx_queue: Arc<Queue>,
subpass: Subpass,
@ -160,7 +159,7 @@ impl PixelsDrawPipeline {
.unwrap()
}
/// Draw input `image` over a quad of size -1.0 to 1.0
/// Draws input `image` over a quad of size -1.0 to 1.0.
pub fn draw(
&self,
viewport_dimensions: [u32; 2],
@ -204,7 +203,7 @@ impl PixelsDrawPipeline {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location=0) in vec2 position;
layout(location=1) in vec2 tex_coords;
@ -215,14 +214,14 @@ void main() {
gl_Position = vec4(position, 0.0, 1.0);
f_tex_coords = tex_coords;
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 v_tex_coords;
@ -233,6 +232,6 @@ layout(set = 0, binding = 0) uniform sampler2D tex;
void main() {
f_color = texture(tex, v_tex_coords);
}
"
",
}
}

View File

@ -24,7 +24,7 @@ use vulkano::{
};
use vulkano_util::renderer::{DeviceImageView, SwapchainImageView};
/// A render pass which places an incoming image over frame filling it
/// A render pass which places an incoming image over frame filling it.
pub struct RenderPassPlaceOverFrame {
gfx_queue: Arc<Queue>,
render_pass: Arc<RenderPass>,
@ -72,8 +72,8 @@ impl RenderPassPlaceOverFrame {
}
}
/// Place view exactly over swapchain image target.
/// Texture draw pipeline uses a quad onto which it places the view.
/// Places the view exactly over the target swapchain image. The texture draw pipeline uses a
/// quad onto which it places the view.
pub fn render<F>(
&self,
before_future: F,
@ -83,9 +83,10 @@ impl RenderPassPlaceOverFrame {
where
F: GpuFuture + 'static,
{
// Get dimensions
// Get dimensions.
let img_dims = target.image().dimensions();
// Create framebuffer (must be in same order as render pass description in `new`
// Create framebuffer (must be in same order as render pass description in `new`.
let framebuffer = Framebuffer::new(
self.render_pass.clone(),
FramebufferCreateInfo {
@ -94,14 +95,16 @@ impl RenderPassPlaceOverFrame {
},
)
.unwrap();
// Create primary command buffer builder
// Create primary command buffer builder.
let mut command_buffer_builder = AutoCommandBufferBuilder::primary(
&self.command_buffer_allocator,
self.gfx_queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
// Begin render pass
// Begin render pass.
command_buffer_builder
.begin_render_pass(
RenderPassBeginInfo {
@ -111,17 +114,22 @@ impl RenderPassPlaceOverFrame {
SubpassContents::SecondaryCommandBuffers,
)
.unwrap();
// Create secondary command buffer from texture pipeline & send draw commands
// Create secondary command buffer from texture pipeline & send draw commands.
let cb = self
.pixels_draw_pipeline
.draw(img_dims.width_height(), view);
// Execute above commands (subpass)
// Execute above commands (subpass).
command_buffer_builder.execute_commands(cb).unwrap();
// End render pass
// End render pass.
command_buffer_builder.end_render_pass().unwrap();
// Build command buffer
// Build command buffer.
let command_buffer = command_buffer_builder.build().unwrap();
// Execute primary command buffer
// Execute primary command buffer.
let after_future = before_future
.then_execute(self.gfx_queue.clone(), command_buffer)
.unwrap();

View File

@ -7,67 +7,64 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Multisampling anti-aliasing example, using a render pass resolve.
//!
//! # Introduction to multisampling
//!
//! When you draw an object on an image, this object occupies a certain set of pixels. Each pixel
//! of the image is either fully covered by the object, or not covered at all. There is no such
//! thing as a pixel that is half-covered by the object that you're drawing. What this means is
//! that you will sometimes see a "staircase effect" at the border of your object, also called
//! aliasing.
//!
//! The root cause of aliasing is that the resolution of the image is not high enough. If you
//! increase the size of the image you're drawing to, this effect will still exist but will be
//! much less visible.
//!
//! In order to decrease aliasing, some games and programs use what we call "Super-Sampling Anti
//! Aliasing" (SSAA). For example instead of drawing to an image of size 1024x1024, you draw to an
//! image of size 4096x4096. Then at the end, you scale down your image to 1024x1024 by merging
//! nearby pixels. Since the intermediate image is 4 times larger than the destination, this would
//! be x4 SSAA.
//!
//! However this technique is very expensive in terms of GPU power. The fragment shader and all
//! its calculations has to run four times more often.
//!
//! So instead of SSAA, a common alternative is MSAA (MultiSampling Anti Aliasing). The base
//! principle is more or less the same: you draw to an image of a larger dimension, and then at
//! the end you scale it down to the final size. The difference is that the fragment shader is
//! only run once per pixel of the final size, and its value is duplicated to fill to all the
//! pixels of the intermediate image that are covered by the object.
//!
//! For example, let's say that you use x4 MSAA, you draw to an intermediate image of size
//! 4096x4096, and your object covers the whole image. With MSAA, the fragment shader will only
//! be 1,048,576 times (1024 * 1024), compared to 16,777,216 times (4096 * 4096) with 4x SSAA.
//! Then the output of each fragment shader invocation is copied in each of the four pixels of the
//! intermediate image that correspond to each pixel of the final image.
//!
//! Now, let's say that your object doesn't cover the whole image. In this situation, only the
//! pixels of the intermediate image that are covered by the object will receive the output of the
//! fragment shader.
//!
//! Because of the way it works, this technique requires direct support from the hardware,
//! contrary to SSAA which can be done on any machine.
//!
//! # Multisampled images
//!
//! Using MSAA with Vulkan is done by creating a regular image, but with a number of samples per
//! pixel different from 1. For example if you want to use 4x MSAA, you should create an image with
//! 4 samples per pixel. Internally this image will have 4 times as many pixels as its dimensions
//! would normally require, but this is handled transparently for you. Drawing to a multisampled
//! image is exactly the same as drawing to a regular image.
//!
//! However multisampled images have some restrictions, for example you can't show them on the
//! screen (swapchain images are always single-sampled), and you can't copy them into a buffer.
//! Therefore when you have finished drawing, you have to blit your multisampled image to a
//! non-multisampled image. This operation is not a regular blit (blitting a multisampled image is
//! an error), instead it is called *resolving* the image.
//!
// Multisampling anti-aliasing example, using a render pass resolve.
//
// # Introduction to multisampling
//
// When you draw an object on an image, this object occupies a certain set of pixels. Each pixel of
// the image is either fully covered by the object, or not covered at all. There is no such thing
// as a pixel that is half-covered by the object that you're drawing. What this means is that you
// will sometimes see a "staircase effect" at the border of your object, also called aliasing.
//
// The root cause of aliasing is that the resolution of the image is not high enough. If you
// increase the size of the image you're drawing to, this effect will still exist but will be much
// less visible.
//
// In order to decrease aliasing, some games and programs use what we call "SuperSample Anti-
// Aliasing" (SSAA). For example instead of drawing to an image of size 1024x1024, you draw to an
// image of size 2048x2048. Then at the end, you scale down your image to 1024x1024 by merging
// nearby pixels. Since the intermediate image is 4 times larger than the destination, this would
// be 4x SSAA.
//
// However this technique is very expensive in terms of GPU power. The fragment shader and all its
// calculations has to run four times more often.
//
// So instead of SSAA, a common alternative is MSAA (MultiSample Anti-Aliasing). The base principle
// is more or less the same: you draw to an image of a larger dimension, and then at the end you
// scale it down to the final size. The difference is that the fragment shader is only run once per
// pixel of the final size, and its value is duplicated to fill to all the pixels of the
// intermediate image that are covered by the object.
//
// For example, let's say that you use 4x MSAA, you draw to an intermediate image of size
// 2048x2048, and your object covers the whole image. With MSAA, the fragment shader will only be
// run 1,048,576 times (1024 * 1024), compared to 4,194,304 times (2048 * 2048) with 4x SSAA. Then
// the output of each fragment shader invocation is copied in each of the four pixels of the
// intermediate image that correspond to each pixel of the final image.
//
// Now, let's say that your object doesn't cover the whole image. In this situation, only the
// pixels of the intermediate image that are covered by the object will receive the output of the
// fragment shader.
//
// Because of the way it works, this technique requires direct support from the hardware, contrary
// to SSAA which can be done on any machine.
//
// # Multisampled images
//
// Using MSAA with Vulkan is done by creating a regular image, but with a number of samples per
// pixel different from 1. For example if you want to use 4x MSAA, you should create an image with
// 4 samples per pixel. Internally this image will have 4 times as many pixels as its dimensions
// would normally require, but this is handled transparently for you. Drawing to a multisampled
// image is exactly the same as drawing to a regular image.
//
// However multisampled images have some restrictions, for example you can't show them on the
// screen (swapchain images are always single-sampled), and you can't copy them into a buffer.
// Therefore when you have finished drawing, you have to blit your multisampled image to a
// non-multisampled image. This operation is not a regular blit (blitting a multisampled image is
// an error), instead it is called *resolving* the image.
use bytemuck::{Pod, Zeroable};
use std::{fs::File, io::BufWriter, path::Path};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
CopyImageToBufferInfo, PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassContents,
@ -101,7 +98,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -135,7 +131,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -195,14 +191,16 @@ fn main() {
load: Clear,
store: DontCare,
format: Format::R8G8B8A8_UNORM,
samples: 4, // This has to match the image definition.
// This has to match the image definition.
samples: 4,
},
// The second framebuffer attachment is the final image.
color: {
load: DontCare,
store: Store,
format: Format::R8G8B8A8_UNORM,
samples: 1, // Same here, this has to match.
// Same here, this has to match.
samples: 1,
}
},
pass: {
@ -230,30 +228,30 @@ fn main() {
.unwrap();
// Here is the "end" of the multisampling example, as starting from here everything is the same
// as in any other example.
// The pipeline, vertex buffer, and command buffer are created in exactly the same way as
// without multisampling.
// At the end of the example, we copy the content of `image` (ie. the final image) to a buffer,
// then read the content of that buffer and save it to a PNG file.
// as in any other example. The pipeline, vertex buffer, and command buffer are created in
// exactly the same way as without multisampling. At the end of the example, we copy the
// content of `image` (ie. the final image) to a buffer, then read the content of that buffer
// and save it to a PNG file.
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -261,15 +259,15 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
",
}
}
let vs = vs::load(device.clone()).unwrap();
let fs = fs::load(device.clone()).unwrap();
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],

View File

@ -12,14 +12,13 @@
// This is the only example that is entirely detailed. All the other examples avoid code
// duplication by using helper functions.
//
// This example assumes that you are already more or less familiar with graphics programming
// and that you want to learn Vulkan. This means that for example it won't go into details about
// what a vertex or a shader is.
// This example assumes that you are already more or less familiar with graphics programming and
// that you want to learn Vulkan. This means that for example it won't go into details about what a
// vertex or a shader is.
use bytemuck::{Pod, Zeroable};
use std::{collections::HashMap, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
RenderPassBeginInfo, SubpassContents,
@ -54,7 +53,7 @@ use winit::{
window::{Window, WindowBuilder},
};
// A struct to contain resources related to a window
/// A struct to contain resources related to a window.
struct WindowSurface {
surface: Arc<Surface>,
swapchain: Arc<Swapchain>,
@ -70,7 +69,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -78,18 +76,20 @@ fn main() {
.unwrap();
let event_loop = EventLoop::new();
// A hashmap that contains all of our created windows and their resources
// A hashmap that contains all of our created windows and their resources.
let mut window_surfaces = HashMap::new();
let surface = WindowBuilder::new()
.build_vk_surface(&event_loop, instance.clone())
.unwrap();
// Use the window's id as a means to access it from the hashmap
// Use the window's id as a means to access it from the hashmap.
let window = surface.object().unwrap().downcast_ref::<Window>().unwrap();
let window_id = window.id();
// Find the device and a queue.
// TODO: it is assumed the device, queue, and surface surface_capabilities are the same for all windows
// TODO: it is assumed the device, queue, and surface surface_capabilities are the same for all
// windows.
let (device, queue, surface_caps) = {
let device_extensions = DeviceExtensions {
@ -123,7 +123,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -147,8 +147,7 @@ fn main() {
(device, queues.next().unwrap(), surface_capabilities)
};
// The swapchain and framebuffer images for this perticular window
// The swapchain and framebuffer images for this particular window.
let (swapchain, images) = {
let image_format = Some(
device
@ -180,8 +179,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -211,7 +210,7 @@ fn main() {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -219,14 +218,14 @@ fn main() {
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -234,7 +233,7 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
",
}
}
@ -379,11 +378,11 @@ fn main() {
}
Event::RedrawRequested(window_id) => {
let WindowSurface {
ref surface,
ref mut swapchain,
ref mut recreate_swapchain,
ref mut framebuffers,
ref mut previous_frame_end,
surface,
swapchain,
recreate_swapchain,
framebuffers,
previous_frame_end,
} = window_surfaces.get_mut(&window_id).unwrap();
let window = surface.object().unwrap().downcast_ref::<Window>().unwrap();
@ -401,7 +400,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
*swapchain = new_swapchain;
@ -417,7 +416,7 @@ fn main() {
*recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -472,7 +471,7 @@ fn main() {
*previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
*previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}

View File

@ -12,12 +12,15 @@ use crate::{
WINDOW2_HEIGHT, WINDOW2_WIDTH, WINDOW_HEIGHT, WINDOW_WIDTH,
};
use std::{collections::HashMap, sync::Arc};
use vulkano::command_buffer::allocator::StandardCommandBufferAllocator;
use vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator;
use vulkano::memory::allocator::StandardMemoryAllocator;
use vulkano::{device::Queue, format::Format};
use vulkano_util::context::{VulkanoConfig, VulkanoContext};
use vulkano_util::window::{VulkanoWindows, WindowDescriptor};
use vulkano::{
command_buffer::allocator::StandardCommandBufferAllocator,
descriptor_set::allocator::StandardDescriptorSetAllocator, device::Queue, format::Format,
memory::allocator::StandardMemoryAllocator,
};
use vulkano_util::{
context::{VulkanoConfig, VulkanoContext},
window::{VulkanoWindows, WindowDescriptor},
};
use winit::{event_loop::EventLoop, window::WindowId};
pub struct RenderPipeline {
@ -68,7 +71,7 @@ pub struct App {
impl App {
pub fn open(&mut self, event_loop: &EventLoop<()>) {
// Create windows & pipelines
// Create windows & pipelines.
let id1 = self.windows.create_window(
event_loop,
&self.context,
@ -94,7 +97,7 @@ impl App {
self.pipelines.insert(
id1,
RenderPipeline::new(
// Use same queue.. for synchronization
// Use same queue.. for synchronization.
self.context.graphics_queue().clone(),
self.context.graphics_queue().clone(),
[

View File

@ -10,28 +10,28 @@
use cgmath::Vector2;
use rand::Rng;
use std::sync::Arc;
use vulkano::buffer::{BufferAllocateInfo, Subbuffer};
use vulkano::command_buffer::allocator::StandardCommandBufferAllocator;
use vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator;
use vulkano::image::{ImageUsage, StorageImage};
use vulkano::memory::allocator::MemoryAllocator;
use vulkano::{
buffer::{Buffer, BufferUsage},
command_buffer::{AutoCommandBufferBuilder, CommandBufferUsage, PrimaryAutoCommandBuffer},
descriptor_set::{PersistentDescriptorSet, WriteDescriptorSet},
buffer::{Buffer, BufferAllocateInfo, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
PrimaryAutoCommandBuffer,
},
descriptor_set::{
allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet,
},
device::Queue,
format::Format,
image::ImageAccess,
image::{ImageAccess, ImageUsage, StorageImage},
memory::allocator::MemoryAllocator,
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
sync::GpuFuture,
};
use vulkano_util::renderer::DeviceImageView;
/// Pipeline holding double buffered grid & color image.
/// Grids are used to calculate the state, and color image is used to show the output.
/// Because each step we determine state in parallel, we need to write the output to
/// another grid. Otherwise the state would not be correctly determined as one thread might read
/// data that was just written by another thread
/// Pipeline holding double buffered grid & color image. Grids are used to calculate the state, and
/// color image is used to show the output. Because on each step we determine state in parallel, we
/// need to write the output to another grid. Otherwise the state would not be correctly determined
/// as one shader invocation might read data that was just written by another shader invocation.
pub struct GameOfLifeComputePipeline {
compute_queue: Arc<Queue>,
compute_life_pipeline: Arc<ComputePipeline>,
@ -126,13 +126,15 @@ impl GameOfLifeComputePipeline {
)
.unwrap();
// Dispatch will mutate the builder adding commands which won't be sent before we build the command buffer
// after dispatches. This will minimize the commands we send to the GPU. For example, we could be doing
// tens of dispatches here depending on our needs. Maybe we wanted to simulate 10 steps at a time...
// Dispatch will mutate the builder adding commands which won't be sent before we build the
// command buffer after dispatches. This will minimize the commands we send to the GPU. For
// example, we could be doing tens of dispatches here depending on our needs. Maybe we
// wanted to simulate 10 steps at a time...
// First compute the next state
// First compute the next state.
self.dispatch(&mut builder, life_color, dead_color, 0);
// Then color based on the next state
// Then color based on the next state.
self.dispatch(&mut builder, life_color, dead_color, 1);
let command_buffer = builder.build().unwrap();
@ -141,13 +143,13 @@ impl GameOfLifeComputePipeline {
.unwrap();
let after_pipeline = finished.then_signal_fence_and_flush().unwrap().boxed();
// Swap input and output so the output becomes the input for next frame
// Swap input and output so the output becomes the input for next frame.
std::mem::swap(&mut self.life_in, &mut self.life_out);
after_pipeline
}
/// Build the command for a dispatch.
/// Builds the command for a dispatch.
fn dispatch(
&self,
builder: &mut AutoCommandBufferBuilder<
@ -156,10 +158,10 @@ impl GameOfLifeComputePipeline {
>,
life_color: [f32; 4],
dead_color: [f32; 4],
// Step determines whether we color or compute life (see branch in the shader)s
// Step determines whether we color or compute life (see branch in the shader)s.
step: i32,
) {
// Resize image if needed
// Resize image if needed.
let img_dims = self.image.image().dimensions().width_height();
let pipeline_layout = self.compute_life_pipeline.layout();
let desc_layout = pipeline_layout.set_layouts().get(0).unwrap();
@ -174,7 +176,7 @@ impl GameOfLifeComputePipeline {
)
.unwrap();
let push_constants = compute_life_cs::ty::PushConstants {
let push_constants = compute_life_cs::PushConstants {
life_color,
dead_color,
step,
@ -191,7 +193,7 @@ impl GameOfLifeComputePipeline {
mod compute_life_cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
@ -235,15 +237,16 @@ void compute_life() {
if (life_out[get_index(down_left)] == 1) { alive_count += 1; }
if (life_out[get_index(left)] == 1) { alive_count += 1; }
// Dead becomes alive
// Dead becomes alive.
if (life_out[index] == 0 && alive_count == 3) {
life_out[index] = 1;
} // Becomes dead
}
// Becomes dead.
else if (life_out[index] == 1 && alive_count < 2 || alive_count > 3) {
life_out[index] = 0;
} // Else Do nothing
}
// Else do nothing.
else {
life_out[index] = life_in[index];
}
}
@ -264,6 +267,7 @@ void main() {
} else {
compute_color();
}
}",
}
",
}
}

View File

@ -7,6 +7,15 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// A multi windowed game of life application. You could use this to learn:
//
// - how to handle multiple window inputs,
// - how to draw on a canvas,
// - how to organize compute shader with graphics,
// - how to do a cellular automata simulation using compute shaders.
//
// The possibilities are limitless. ;)
mod app;
mod game_of_life;
mod pixels_draw;
@ -23,13 +32,6 @@ use winit::{
platform::run_return::EventLoopExtRunReturn,
};
// A multi windowed game of life application. You could use this to learn:
// - how to handle multiple window inputs,
// - how to draw on a canvas
// - how to organize compute shader with graphics
// - how to do a cellular automata simulation using compute shaders
// The possibilities are limitless ;)
pub const WINDOW_WIDTH: f32 = 1024.0;
pub const WINDOW_HEIGHT: f32 = 1024.0;
pub const WINDOW2_WIDTH: f32 = 512.0;
@ -37,21 +39,25 @@ pub const WINDOW2_HEIGHT: f32 = 512.0;
pub const SCALING: f32 = 2.0;
fn main() {
println!("Welcome to Vulkano Game of Life\n Use Mouse to draw life on the grid(s)\n");
// Create event loop
println!("Welcome to Vulkano Game of Life\nUse the mouse to draw life on the grid(s)\n");
// Create event loop.
let mut event_loop = EventLoop::new();
// Create app with vulkano context
// Create app with vulkano context.
let mut app = App::default();
app.open(&event_loop);
// Time & inputs...
let mut time = Instant::now();
let mut cursor_pos = Vector2::new(0.0, 0.0);
// An extremely crude way to handle input state... But works for this example.
// An extremely crude way to handle input state... but works for this example.
let mut mouse_is_pressed_w1 = false;
let mut mouse_is_pressed_w2 = false;
loop {
// Event handling
// Event handling.
if !handle_events(
&mut event_loop,
&mut app,
@ -61,14 +67,16 @@ fn main() {
) {
break;
}
// Draw life on windows if mouse is down
// Draw life on windows if mouse is down.
draw_life(
&mut app,
cursor_pos,
mouse_is_pressed_w1,
mouse_is_pressed_w2,
);
// Compute life & render 60fps
// Compute life & render 60fps.
if (Instant::now() - time).as_secs_f64() > 1.0 / 60.0 {
compute_then_render_per_window(&mut app);
time = Instant::now();
@ -76,7 +84,7 @@ fn main() {
}
}
/// Handle events and return `bool` if we should quit
/// Handles events and returns a `bool` indicating if we should quit.
fn handle_events(
event_loop: &mut EventLoop<()>,
app: &mut App,
@ -85,6 +93,7 @@ fn handle_events(
mouse_pressed_w2: &mut bool,
) -> bool {
let mut is_running = true;
event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match &event {
@ -95,20 +104,21 @@ fn handle_events(
if *window_id == app.windows.primary_window_id().unwrap() {
is_running = false;
} else {
// Destroy window by removing its renderer...
// Destroy window by removing its renderer.
app.windows.remove_renderer(*window_id);
app.pipelines.remove(window_id);
}
}
// Resize window and its images...
// Resize window and its images.
WindowEvent::Resized(..) | WindowEvent::ScaleFactorChanged { .. } => {
let vulkano_window = app.windows.get_renderer_mut(*window_id).unwrap();
vulkano_window.resize();
}
// Handle mouse position events.
WindowEvent::CursorMoved { position, .. } => {
*cursor_pos = Vector2::new(position.x as f32, position.y as f32)
}
// Mouse button event
// Handle mouse button events.
WindowEvent::MouseInput { state, button, .. } => {
let mut mouse_pressed = false;
if button == &MouseButton::Left && state == &ElementState::Pressed {
@ -129,6 +139,7 @@ fn handle_events(
_ => (),
}
});
is_running
}
@ -146,13 +157,15 @@ fn draw_life(
if id != &primary_window_id && !mouse_is_pressed_w2 {
continue;
}
let window_size = window.window_size();
let compute_pipeline = &mut app.pipelines.get_mut(id).unwrap().compute;
let mut normalized_pos = Vector2::new(
(cursor_pos.x / window_size[0]).clamp(0.0, 1.0),
(cursor_pos.y / window_size[1]).clamp(0.0, 1.0),
);
// flip y
// Flip y.
normalized_pos.y = 1.0 - normalized_pos.y;
let image_size = compute_pipeline
.color_image()
@ -166,7 +179,7 @@ fn draw_life(
}
}
/// Compute and render per window
/// Compute and render per window.
fn compute_then_render_per_window(app: &mut App) {
let primary_window_id = app.windows.primary_window_id().unwrap();
for (window_id, window_renderer) in app.windows.iter_mut() {
@ -179,14 +192,14 @@ fn compute_then_render_per_window(app: &mut App) {
}
}
/// Compute game of life, then display result on target image
/// Compute game of life, then display result on target image.
fn compute_then_render(
window_renderer: &mut VulkanoWindowRenderer,
pipeline: &mut RenderPipeline,
life_color: [f32; 4],
dead_color: [f32; 4],
) {
// Skip this window when minimized
// Skip this window when minimized.
match window_renderer.window_size() {
[w, h] => {
if w == 0.0 || h == 0.0 {
@ -195,7 +208,7 @@ fn compute_then_render(
}
}
// Start frame
// Start the frame.
let before_pipeline_future = match window_renderer.acquire() {
Err(e) => {
println!("{e}");
@ -204,12 +217,12 @@ fn compute_then_render(
Ok(future) => future,
};
// Compute
// Compute.
let after_compute = pipeline
.compute
.compute(before_pipeline_future, life_color, dead_color);
// Render
// Render.
let color_image = pipeline.compute.color_image();
let target_image = window_renderer.swapchain_image_view();
@ -217,6 +230,6 @@ fn compute_then_render(
.place_over_frame
.render(after_compute, color_image, target_image);
// Finish frame. Wait for the future so resources are not in use when we render
// Finish the frame. Wait for the future so resources are not in use when we render.
window_renderer.present(after_render, true);
}

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage, Subbuffer},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder,
CommandBufferInheritanceInfo, CommandBufferUsage, SecondaryAutoCommandBuffer,
@ -33,9 +32,9 @@ use vulkano::{
sampler::{Filter, Sampler, SamplerAddressMode, SamplerCreateInfo, SamplerMipmapMode},
};
/// Vertex for textured quads
/// Vertex for textured quads.
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
pub struct TexturedVertex {
#[format(R32G32_SFLOAT)]
pub position: [f32; 2],
@ -67,7 +66,7 @@ pub fn textured_quad(width: f32, height: f32) -> (Vec<TexturedVertex>, Vec<u32>)
)
}
/// A subpass pipeline that fills a quad over frame
/// A subpass pipeline that fills a quad over the frame.
pub struct PixelsDrawPipeline {
gfx_queue: Arc<Queue>,
subpass: Subpass,
@ -160,7 +159,7 @@ impl PixelsDrawPipeline {
.unwrap()
}
/// Draw input `image` over a quad of size -1.0 to 1.0
/// Draws input `image` over a quad of size -1.0 to 1.0.
pub fn draw(
&self,
viewport_dimensions: [u32; 2],
@ -204,7 +203,7 @@ impl PixelsDrawPipeline {
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location=0) in vec2 position;
layout(location=1) in vec2 tex_coords;
@ -215,14 +214,14 @@ void main() {
gl_Position = vec4(position, 0.0, 1.0);
f_tex_coords = tex_coords;
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 v_tex_coords;
@ -233,6 +232,6 @@ layout(set = 0, binding = 0) uniform sampler2D tex;
void main() {
f_color = texture(tex, v_tex_coords);
}
"
",
}
}

View File

@ -24,7 +24,7 @@ use vulkano::{
};
use vulkano_util::renderer::{DeviceImageView, SwapchainImageView};
/// A render pass which places an incoming image over frame filling it
/// A render pass which places an incoming image over the frame, filling it.
pub struct RenderPassPlaceOverFrame {
gfx_queue: Arc<Queue>,
render_pass: Arc<RenderPass>,
@ -72,8 +72,8 @@ impl RenderPassPlaceOverFrame {
}
}
/// Place view exactly over swapchain image target.
/// Texture draw pipeline uses a quad onto which it places the view.
/// Places the view exactly over the target swapchain image. The texture draw pipeline uses a
/// quad onto which it places the view.
pub fn render<F>(
&self,
before_future: F,
@ -83,9 +83,10 @@ impl RenderPassPlaceOverFrame {
where
F: GpuFuture + 'static,
{
// Get dimensions
// Get the dimensions.
let img_dims = target.image().dimensions();
// Create framebuffer (must be in same order as render pass description in `new`
// Create the framebuffer.
let framebuffer = Framebuffer::new(
self.render_pass.clone(),
FramebufferCreateInfo {
@ -94,14 +95,16 @@ impl RenderPassPlaceOverFrame {
},
)
.unwrap();
// Create primary command buffer builder
// Create a primary command buffer builder.
let mut command_buffer_builder = AutoCommandBufferBuilder::primary(
&self.command_buffer_allocator,
self.gfx_queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
// Begin render pass
// Begin the render pass.
command_buffer_builder
.begin_render_pass(
RenderPassBeginInfo {
@ -111,17 +114,22 @@ impl RenderPassPlaceOverFrame {
SubpassContents::SecondaryCommandBuffers,
)
.unwrap();
// Create secondary command buffer from texture pipeline & send draw commands
// Create a secondary command buffer from the texture pipeline & send draw commands.
let cb = self
.pixels_draw_pipeline
.draw(img_dims.width_height(), view);
// Execute above commands (subpass)
// Execute above commands (subpass).
command_buffer_builder.execute_commands(cb).unwrap();
// End render pass
// End the render pass.
command_buffer_builder.end_render_pass().unwrap();
// Build command buffer
// Build the command buffer.
let command_buffer = command_buffer_builder.build().unwrap();
// Execute primary command buffer
// Execute primary command buffer.
let after_future = before_future
.then_execute(self.gfx_queue.clone(), command_buffer)
.unwrap();

View File

@ -7,16 +7,14 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
//! This example demonstrates using the `VK_KHR_multiview` extension to render to multiple
//! layers of the framebuffer in one render pass. This can significantly improve performance
//! in cases where multiple perspectives or cameras are very similar like in virtual reality
//! or other types of stereoscopic rendering where the left and right eye only differ
//! in a small position offset.
// This example demonstrates using the `VK_KHR_multiview` extension to render to multiple layers of
// the framebuffer in one render pass. This can significantly improve performance in cases where
// multiple perspectives or cameras are very similar like in virtual reality or other types of
// stereoscopic rendering where the left and right eye only differ in a small position offset.
use bytemuck::{Pod, Zeroable};
use std::{fs::File, io::BufWriter, path::Path};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage, Subbuffer},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, BufferImageCopy,
CommandBufferUsage, CopyImageToBufferInfo, RenderPassBeginInfo, SubpassContents,
@ -54,10 +52,10 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: InstanceExtensions {
khr_get_physical_device_properties2: true, // required to get multiview limits
// Required to get multiview limits.
khr_get_physical_device_properties2: true,
..InstanceExtensions::empty()
},
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -68,25 +66,23 @@ fn main() {
..DeviceExtensions::empty()
};
let features = Features {
// enabling the `multiview` feature will use the `VK_KHR_multiview` extension on
// Vulkan 1.0 and the device feature on Vulkan 1.1+
// enabling the `multiview` feature will use the `VK_KHR_multiview` extension on Vulkan 1.0
// and the device feature on Vulkan 1.1+.
multiview: true,
..Features::empty()
};
let (physical_device, queue_family_index) = instance.enumerate_physical_devices().unwrap()
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter(|p| p.supported_features().contains(&features))
.filter(|p| {
p.supported_extensions().contains(&device_extensions)
})
.filter(|p| {
p.supported_features().contains(&features)
})
.filter(|p| {
// This example renders to two layers of the framebuffer using the multiview
// extension so we check that at least two views are supported by the device.
// Not checking this on a device that doesn't support two views
// will lead to a runtime error when creating the `RenderPass`.
// The `max_multiview_view_count` function will return `None` when the
// `VK_KHR_get_physical_device_properties2` instance extension has not been enabled.
// This example renders to two layers of the framebuffer using the multiview extension
// so we check that at least two views are supported by the device. Not checking this
// on a device that doesn't support two views will lead to a runtime error when
// creating the `RenderPass`. The `max_multiview_view_count` function will return
// `None` when the `VK_KHR_get_physical_device_properties2` instance extension has not
// been enabled.
p.properties().max_multiview_view_count.unwrap_or(0) >= 2
})
.filter_map(|p| {
@ -103,14 +99,17 @@ fn main() {
PhysicalDeviceType::Other => 4,
_ => 5,
})
// A real application should probably fall back to rendering the framebuffer layers
// in multiple passes when multiview isn't supported.
.expect("No device supports two multiview views or the VK_KHR_get_physical_device_properties2 instance extension has not been loaded");
// A real application should probably fall back to rendering the framebuffer layers in
// multiple passes when multiview isn't supported.
.expect(
"no device supports two multiview views or the \
`VK_KHR_get_physical_device_properties2` instance extension has not been loaded",
);
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -147,8 +146,8 @@ fn main() {
let image_view = ImageView::new_default(image.clone()).unwrap();
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -175,16 +174,15 @@ fn main() {
)
.unwrap();
// Note the `#extension GL_EXT_multiview : enable` that enables the multiview extension
// for the shader and the use of `gl_ViewIndex` which contains a value based on which
// view the shader is being invoked for.
// In this example `gl_ViewIndex` is used toggle a hardcoded offset for vertex positions
// but in a VR application you could easily use it as an index to a uniform array
// that contains the transformation matrices for the left and right eye.
// Note the `#extension GL_EXT_multiview : enable` that enables the multiview extension for the
// shader and the use of `gl_ViewIndex` which contains a value based on which view the shader
// is being invoked for. In this example `gl_ViewIndex` is used to toggle a hardcoded offset
// for vertex positions but in a VR application you could easily use it as an index to a
// uniform array that contains the transformation matrices for the left and right eye.
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
#extension GL_EXT_multiview : enable
@ -193,14 +191,14 @@ fn main() {
void main() {
gl_Position = vec4(position, 0.0, 1.0) + gl_ViewIndex * vec4(0.25, 0.25, 0.0, 0.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -208,7 +206,7 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
",
}
}
@ -228,8 +226,8 @@ fn main() {
..Default::default()
}],
subpasses: vec![SubpassDescription {
// the view mask indicates which layers of the framebuffer should be rendered for each
// subpass
// The view mask indicates which layers of the framebuffer should be rendered for each
// subpass.
view_mask: 0b11,
color_attachments: vec![Some(AttachmentReference {
attachment: 0,
@ -238,8 +236,8 @@ fn main() {
})],
..Default::default()
}],
// the correlated view masks indicate sets of views that may be more efficient to render
// concurrently
// The correlated view masks indicate sets of views that may be more efficient to render
// concurrently.
correlated_view_masks: vec![0b11],
..Default::default()
};
@ -299,8 +297,8 @@ fn main() {
)
.unwrap();
// drawing commands are broadcast to each view in the view mask of the active renderpass
// which means only a single draw call is needed to draw to multiple layers of the framebuffer
// Drawing commands are broadcast to each view in the view mask of the active renderpass which
// means only a single draw call is needed to draw to multiple layers of the framebuffer.
builder
.begin_render_pass(
RenderPassBeginInfo {
@ -317,7 +315,7 @@ fn main() {
.end_render_pass()
.unwrap();
// copy the image layers to different buffers to save them as individual images to disk
// Copy the image layers to different buffers to save them as individual images to disk.
builder
.copy_image_to_buffer(CopyImageToBufferInfo {
regions: [BufferImageCopy {
@ -356,7 +354,7 @@ fn main() {
future.wait(None).unwrap();
// write each layer to its own file
// Write each layer to its own file.
write_image_buffer_to_file(
buffer1,
"multiview1.png",

View File

@ -7,14 +7,13 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// This is a modification of the triangle example, that demonstrates the basics of occlusion queries.
// Occlusion queries allow you to query whether, and sometimes how many, pixels pass the depth test
// in a range of draw calls.
// This is a modification of the triangle example, that demonstrates the basics of occlusion
// queries. Occlusion queries allow you to query whether, and sometimes how many, pixels pass the
// depth test in a range of draw calls.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
RenderPassBeginInfo, SubpassContents,
@ -59,7 +58,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -154,8 +152,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32B32_SFLOAT)]
position: [f32; 3],
@ -177,10 +175,9 @@ fn main() {
position: [0.25, -0.1, 0.5],
color: [1.0, 0.0, 0.0],
},
// The second triangle (cyan) is the same shape and position as the first,
// but smaller, and moved behind a bit.
// It should be completely occluded by the first triangle.
// (You can lower its z value to put it in front)
// The second triangle (cyan) is the same shape and position as the first, but smaller, and
// moved behind a bit. It should be completely occluded by the first triangle. (You can
// lower its z value to put it in front.)
Vertex {
position: [-0.25, -0.125, 0.6],
color: [0.0, 1.0, 1.0],
@ -193,9 +190,8 @@ fn main() {
position: [0.125, -0.05, 0.6],
color: [0.0, 1.0, 1.0],
},
// The third triangle (green) is the same shape and size as the first,
// but moved to the left and behind the second.
// It is partially occluded by the first two.
// The third triangle (green) is the same shape and size as the first, but moved to the
// left and behind the second. It is partially occluded by the first two.
Vertex {
position: [-0.25, -0.25, 0.7],
color: [0.0, 1.0, 0.0],
@ -234,18 +230,17 @@ fn main() {
)
.unwrap();
// Create a buffer on the CPU to hold the results of the three queries.
// Query results are always represented as either `u32` or `u64`.
// For occlusion queries, you always need one element per query. You can ask for the number of
// elements needed at runtime by calling `QueryType::result_len`.
// If you retrieve query results with `with_availability` enabled, then this array needs to
// be 6 elements long instead of 3.
// Create a buffer on the CPU to hold the results of the three queries. Query results are
// always represented as either `u32` or `u64`. For occlusion queries, you always need one
// element per query. You can ask for the number of elements needed at runtime by calling
// `QueryType::result_len`. If you retrieve query results with `with_availability` enabled,
// then this array needs to be 6 elements long instead of 3.
let mut query_results = [0u32; 3];
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec3 position;
@ -257,14 +252,14 @@ fn main() {
v_color = color;
gl_Position = vec4(position, 1.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec3 v_color;
@ -273,7 +268,7 @@ fn main() {
void main() {
f_color = vec4(v_color, 1.0);
}
"
",
}
}
@ -310,9 +305,9 @@ fn main() {
.viewport_state(ViewportState::viewport_dynamic_scissor_irrelevant())
.fragment_shader(fs.entry_point("main").unwrap(), ())
.render_pass(Subpass::from(render_pass.clone(), 0).unwrap())
// Enable depth testing, which is needed for occlusion queries to make sense at all.
// If you disable depth testing, every pixel is considered to pass the depth test, so
// every query will return a nonzero result.
// Enable depth testing, which is needed for occlusion queries to make sense at all. If you
// disable depth testing, every pixel is considered to pass the depth test, so every query
// will return a nonzero result.
.depth_stencil_state(DepthStencilState::simple_depth_test())
.build(device.clone())
.unwrap();
@ -365,7 +360,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -385,7 +380,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -402,8 +397,8 @@ fn main() {
// Beginning or resetting a query is unsafe for now.
unsafe {
builder
// A query must be reset before each use, including the first use.
// This must be done outside a render pass.
// A query must be reset before each use, including the first use. This must be
// done outside a render pass.
.reset_query_pool(query_pool.clone(), 0..3)
.unwrap()
.set_viewport(0, [viewport.clone()])
@ -418,14 +413,14 @@ fn main() {
SubpassContents::Inline,
)
.unwrap()
// Begin query 0, then draw the red triangle.
// Enabling the `QueryControlFlags::PRECISE` flag would give exact numeric
// results. This needs the `occlusion_query_precise` feature to be enabled on
// the device.
// Begin query 0, then draw the red triangle. Enabling the
// `QueryControlFlags::PRECISE` flag would give exact numeric results. This
// needs the `occlusion_query_precise` feature to be enabled on the device.
.begin_query(
query_pool.clone(),
0,
QueryControlFlags::empty(), // QueryControlFlags::PRECISE
QueryControlFlags::empty(),
// QueryControlFlags::PRECISE,
)
.unwrap()
.bind_vertex_buffers(0, triangle1.clone())
@ -477,16 +472,15 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
// Retrieve the query results.
// This copies the results to a variable on the CPU. You can also use the
// `copy_query_pool_results` function on a command buffer to write results to a
// Vulkano buffer. This could then be used to influence draw operations further down
// the line, either in the same frame or a future frame.
// Retrieve the query results. This copies the results to a variable on the CPU. You
// can also use the `copy_query_pool_results` function on a command buffer to write
// results to a Vulkano buffer. This could then be used to influence draw operations
// further down the line, either in the same frame or a future frame.
#[rustfmt::skip]
query_pool
.queries_range(0..3)
@ -494,21 +488,21 @@ fn main() {
.get_results(
&mut query_results,
// Block the function call until the results are available.
// Note: if not all the queries have actually been executed, then this
// will wait forever for something that never happens!
// NOTE: If not all the queries have actually been executed, then this will
// wait forever for something that never happens!
QueryResultFlags::WAIT
// Enable this flag to give partial results if available, instead of waiting
// for the full results.
// | QueryResultFlags::PARTIAL
// Blocking and waiting will ensure the results are always available after
// the function returns.
// Blocking and waiting will ensure the results are always available after the
// function returns.
//
// If you disable waiting, then this flag can be enabled to include the
// availability of each query's results. You need one extra element per
// query in your `query_results` buffer for this. This element will
// be filled with a zero/nonzero value indicating availability.
// availability of each query's results. You need one extra element per query
// in your `query_results` buffer for this. This element will be filled with a
// zero/nonzero value indicating availability.
// | QueryResultFlags::WITH_AVAILABILITY
)
.unwrap();

View File

@ -9,23 +9,20 @@
// This example demonstrates how to use pipeline caching.
//
// Using a PipelineCache can improve performance significantly,
// by checking if the requested pipeline exists in the cache and if so,
// return that pipeline directly or insert that new pipeline into the
// cache.
// Using a `PipelineCache` can improve performance significantly, by checking if the requested
// pipeline exists in the cache and if so, return that pipeline directly or insert that new
// pipeline into the cache.
//
// You can retrieve the data in the cache as a `Vec<u8>` and
// save that to a binary file. Later you can load that file and build a
// PipelineCache with the given data. Be aware that the Vulkan
// implementation does not check if the data is valid and vulkano
// currently does not either. Invalid data can lead to driver crashes
// or worse. Using the same cache data with a different GPU probably
// won't work, a simple driver update can lead to invalid data as well.
// To check if your data is valid you can find inspiration here:
// You can retrieve the data in the cache as a `Vec<u8>` and save that to a binary file. Later you
// can load that file and build a PipelineCache with the given data. Be aware that the Vulkan
// implementation does not check if the data is valid and vulkano currently does not either.
// Invalid data can lead to driver crashes or worse. Using the same cache data with a different GPU
// probably won't work, a simple driver update can lead to invalid data as well. To check if your
// data is valid you can find inspiration here:
// https://zeux.io/2019/07/17/serializing-pipeline-cache/
//
// In the future, vulkano might implement those safety checks, but for
// now, you would have to do that yourself or trust the data and the user.
// In the future, vulkano might implement those safety checks, but for now, you would have to do
// that yourself or trust the data and the user.
use std::{
fs::{remove_file, rename, File},
@ -47,7 +44,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -82,7 +78,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
// Now initializing the device.
@ -102,33 +98,33 @@ fn main() {
// We are creating an empty PipelineCache to start somewhere.
let pipeline_cache = PipelineCache::empty(device.clone()).unwrap();
// We need to create the compute pipeline that describes our operation. We are using the
// shader from the basic-compute-shader example.
// We need to create the compute pipeline that describes our operation. We are using the shader
// from the basic-compute-shader example.
//
// If you are familiar with graphics pipeline, the principle is the same except that compute
// pipelines are much simpler to create.
//
// Pass the PipelineCache as an optional parameter to the ComputePipeline constructor.
// For GraphicPipelines you can use the GraphicPipelineBuilder that has a method
// `build_with_cache(cache: Arc<PipelineCache>)`
// Pass the `PipelineCache` as an optional parameter to the `ComputePipeline` constructor. For
// `GraphicPipeline`s you can use the `GraphicPipelineBuilder` that has a method
// `build_with_cache(cache: Arc<PipelineCache>)`.
let _pipeline = {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
uint idx = gl_GlobalInvocationID.x;
data.data[idx] *= 12;
data[idx] *= 12;
}
"
",
}
}
let shader = cs::load(device.clone()).unwrap();
@ -142,13 +138,12 @@ fn main() {
.unwrap()
};
// Normally you would use your pipeline for computing, but we just want to focus on the
// cache functionality.
// The cache works the same for a GraphicsPipeline, a ComputePipeline is just simpler to
// build.
// Normally you would use your pipeline for computing, but we just want to focus on the cache
// functionality. The cache works the same for a `GraphicsPipeline`, a `ComputePipeline` is
// just simpler to build.
//
// We are now going to retrieve the cache data into a Vec<u8> and save that to a file on
// our disk.
// We are now going to retrieve the cache data into a Vec<u8> and save that to a file on our
// disk.
if let Ok(data) = pipeline_cache.get_data() {
if let Ok(mut file) = File::create("pipeline_cache.bin.tmp") {
@ -160,13 +155,13 @@ fn main() {
}
}
// The PipelineCache is now saved to disk and can be loaded the next time the application
// is started. This way, the pipelines do not have to be rebuild and pipelines that might
// exist in the cache can be build far quicker.
// The `PipelineCache` is now saved to disk and can be loaded the next time the application is
// started. This way, the pipelines do not have to be rebuild and pipelines that might exist in
// the cache can be build far quicker.
//
// To load the cache from the file, we just need to load the data into a Vec<u8> and build
// the PipelineCache from that. Note that this function is currently unsafe as there are
// no checks, as it was mentioned at the start of this example.
// To load the cache from the file, we just need to load the data into a Vec<u8> and build the
// `PipelineCache` from that. Note that this function is currently unsafe as there are no
// checks, as it was mentioned at the start of this example.
let data = {
if let Ok(mut file) = File::open("pipeline_cache.bin") {
let mut data = Vec::new();
@ -187,14 +182,13 @@ fn main() {
PipelineCache::empty(device).unwrap()
};
// As the PipelineCache of the Vulkan implementation saves an opaque blob of data,
// there is no real way to know if the data is correct. There might be differences
// in the byte blob here, but it should still work.
// If it doesn't, please check if there is an issue describing this problem, and if
// not open a new one, on the GitHub page.
// As the `PipelineCache` of the Vulkan implementation saves an opaque blob of data, there is
// no real way to know if the data is correct. There might be differences in the byte blob
// here, but it should still work. If it doesn't, please check if there is an issue describing
// this problem, and if not open a new one, on the GitHub page.
assert_eq!(
pipeline_cache.get_data().unwrap(),
second_cache.get_data().unwrap()
second_cache.get_data().unwrap(),
);
println!("Success");
}

View File

@ -7,10 +7,10 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// Push constants are a small bank of values written directly to the command buffer
// and accessible in shaders. They allow the application to set values used in shaders
// without creating buffers or modifying and binding descriptor sets for each update.
// As a result, they are expected to outperform such memory-backed resource updates.
// Push constants are a small bank of values written directly to the command buffer and accessible
// in shaders. They allow the application to set values used in shaders without creating buffers or
// modifying and binding descriptor sets for each update. As a result, they are expected to
// outperform such memory-backed resource updates.
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
@ -36,7 +36,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -70,7 +69,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -90,7 +89,7 @@ fn main() {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
@ -103,13 +102,13 @@ fn main() {
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
uint idx = gl_GlobalInvocationID.x;
if (pc.enable) {
data.data[idx] *= pc.multiple;
data.data[idx] += uint(pc.addend);
data[idx] *= pc.multiple;
data[idx] += uint(pc.addend);
}
}
",
@ -152,18 +151,20 @@ fn main() {
)
.unwrap();
// The `vulkano_shaders::shaders!` macro generates a struct with the correct representation of the push constants struct specified in the shader.
// Here we create an instance of the generated struct.
let push_constants = cs::ty::PushConstantData {
// The `vulkano_shaders::shaders!` macro generates a struct with the correct representation of
// the push constants struct specified in the shader. Here we create an instance of the
// generated struct.
let push_constants = cs::PushConstantData {
multiple: 1,
addend: 1.0,
enable: 1,
};
// For a compute pipeline, push constants are passed to the `dispatch` method.
// For a graphics pipeline, push constants are passed to the `draw` and `draw_indexed` methods.
// Note that there is no type safety for the push constants argument.
// So be careful to only pass an instance of the struct generated by the `vulkano_shaders::shaders!` macro.
// For a compute pipeline, push constants are passed to the `dispatch` method. For a graphics
// pipeline, push constants are passed to the `draw` and `draw_indexed` methods.
//
// Note that there is no type safety for the push constant data, so be careful to only pass an
// instance of the struct generated by the `vulkano_shaders::shaders!` macro.
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::{io::Cursor, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassContents,
@ -59,7 +58,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -98,7 +96,7 @@ fn main() {
PhysicalDeviceType::Other => 4,
_ => 5,
})
.expect("No suitable physical device found");
.expect("no suitable physical device found");
println!(
"Using device: {} (type: {:?})",
@ -155,8 +153,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -315,7 +313,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -331,7 +329,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -391,7 +389,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -400,7 +398,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,
@ -428,7 +426,7 @@ fn window_size_dependent_setup(
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -437,14 +435,15 @@ layout(location = 0) out vec2 tex_coords;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
tex_coords = position + vec2(0.5);
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 tex_coords;
@ -454,6 +453,7 @@ layout(set = 0, binding = 0) uniform sampler2D tex;
void main() {
f_color = texture(tex, tex_coords);
}"
}
",
}
}

View File

@ -6,23 +6,24 @@
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//
// This example demonstrates one way of preparing data structures and loading
// SPIRV shaders from external source (file system).
//
// Note that you will need to do all correctness checking by yourself.
//
// vert.glsl and frag.glsl must be built by yourself.
// One way of building them is to build Khronos' glslang and use
// glslangValidator tool:
// $ glslangValidator vert.glsl -V -S vert -o vert.spv
// $ glslangValidator frag.glsl -V -S frag -o frag.spv
// Vulkano uses glslangValidator to build your shaders internally.
use bytemuck::{Pod, Zeroable};
// This example demonstrates one way of preparing data structures and loading SPIRV shaders from
// external source (file system).
//
// Note that you will need to do all correctness checking yourself.
//
// `vert.glsl` and `frag.glsl` must be built by you. One way of building them is to use `shaderc`:
//
// ```bash
// glslc -fshader-stage=vert vert.glsl -o vert.spv
// glslc -fshader-stage=frag frag.glsl -o frag.spv
// ```
//
// Vulkano uses shaderc to build your shaders internally.
use std::{fs::File, io::Read, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
RenderPassBeginInfo, SubpassContents,
@ -66,7 +67,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -109,7 +109,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -177,10 +177,13 @@ fn main() {
.unwrap();
let vs = {
let mut f = File::open("src/bin/runtime-shader/vert.spv")
.expect("Can't find file src/bin/runtime-shader/vert.spv This example needs to be run from the root of the example crate.");
let mut f = File::open("src/bin/runtime-shader/vert.spv").expect(
"can't find file `src/bin/runtime-shader/vert.spv`, this example needs to be run from \
the root of the example crate",
);
let mut v = vec![];
f.read_to_end(&mut v).unwrap();
// Create a ShaderModule on a device the same Shader::load does it.
// NOTE: You will have to verify correctness of the data by yourself!
unsafe { ShaderModule::from_bytes(device.clone(), &v) }.unwrap()
@ -188,9 +191,10 @@ fn main() {
let fs = {
let mut f = File::open("src/bin/runtime-shader/frag.spv")
.expect("Can't find file src/bin/runtime-shader/frag.spv");
.expect("can't find file `src/bin/runtime-shader/frag.spv`");
let mut v = vec![];
f.read_to_end(&mut v).unwrap();
unsafe { ShaderModule::from_bytes(device.clone(), &v) }.unwrap()
};
@ -213,8 +217,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
pub struct Vertex {
#[format(R32G32_SFLOAT)]
pub position: [f32; 2],
@ -249,6 +253,7 @@ fn main() {
// NOTE: We don't create any descriptor sets in this example, but you should
// note that passing wrong types, providing sets at wrong indexes will cause
// descriptor set builder to return Err!
// TODO: Outdated ^
let mut viewport = Viewport {
origin: [0.0, 0.0],
@ -290,7 +295,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -306,7 +311,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -360,7 +365,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -369,7 +374,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::{io::Cursor, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassContents,
@ -59,8 +58,8 @@ use winit::{
};
fn main() {
// The start of this example is exactly the same as `triangle`. You should read the
// `triangle` example if you haven't done so yet.
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
@ -68,7 +67,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -170,8 +168,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -356,7 +354,7 @@ fn main() {
.descriptor_binding_requirements(),
);
// Set 0, Binding 0
// Set 0, Binding 0.
let binding = layout_create_infos[0].bindings.get_mut(&0).unwrap();
binding.variable_descriptor_count = true;
binding.descriptor_count = 2;
@ -457,7 +455,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -473,7 +471,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -533,7 +531,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -542,7 +540,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,
@ -572,7 +570,7 @@ mod vs {
ty: "vertex",
vulkan_version: "1.2",
spirv_version: "1.5",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -586,7 +584,8 @@ void main() {
gl_Position = vec4(position, 0.0, 1.0);
out_tex_i = tex_i;
out_coords = coords;
}"
}
",
}
}
@ -595,7 +594,7 @@ mod fs {
ty: "fragment",
vulkan_version: "1.2",
spirv_version: "1.5",
src: "
src: r"
#version 450
#extension GL_EXT_nonuniform_qualifier : enable
@ -609,6 +608,7 @@ layout(set = 0, binding = 0) uniform sampler2D tex[];
void main() {
f_color = texture(nonuniformEXT(tex[tex_i]), coords);
}"
}
",
}
}

View File

@ -8,7 +8,8 @@
// according to those terms.
// This example is a copy of `basic-compute-shaders.rs`, but initalizes half of the input buffer
// and then we use `copy_buffer_dimensions` to copy the first half of the input buffer to the second half.
// and then we use `copy_buffer_dimensions` to copy the first half of the input buffer to the
// second half.
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
@ -35,7 +36,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -69,7 +69,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -91,23 +91,24 @@ fn main() {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
uint idx = gl_GlobalInvocationID.x;
data.data[idx] *= 12;
data[idx] *= 12;
}
"
",
}
}
let shader = cs::load(device.clone()).unwrap();
ComputePipeline::new(
device.clone(),
shader.entry_point("main").unwrap(),
@ -123,10 +124,7 @@ fn main() {
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let data_buffer = {
// we intitialize half of the array and leave the other half to 0, we will use copy later to fill it
let data_iter = (0..65536u32).map(|n| if n < 65536 / 2 { n } else { 0 });
Buffer::from_iter(
let data_buffer = Buffer::from_iter(
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::STORAGE_BUFFER
@ -134,10 +132,11 @@ fn main() {
| BufferUsage::TRANSFER_DST,
..Default::default()
},
data_iter,
// We intitialize half of the array and leave the other half at 0, we will use the copy
// command later to fill it.
(0..65536u32).map(|n| if n < 65536 / 2 { n } else { 0 }),
)
.unwrap()
};
.unwrap();
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
@ -154,7 +153,8 @@ fn main() {
)
.unwrap();
builder
// copy from the first half to the second half (inside the same buffer) before we run the computation
// Copy from the first half to the second half (inside the same buffer) before we run the
// computation.
.copy_buffer(CopyBufferInfoTyped {
regions: [BufferCopy {
src_offset: 0,
@ -187,9 +187,9 @@ fn main() {
let data_buffer_content = data_buffer.read().unwrap();
// here we have the same data in the two halfs of the buffer
// Here we have the same data in the two halfs of the buffer.
for n in 0..65536 / 2 {
// the two halfs should have the same data
// The two halfs should have the same data.
assert_eq!(data_buffer_content[n as usize], n * 12);
assert_eq!(data_buffer_content[n as usize + 65536 / 2], n * 12);
}

View File

@ -7,9 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// This example demonstrates how to use the standard and relative include directives within
// shader source code. The boilerplate is taken from the "basic-compute-shader.rs" example, where
// most of the boilerplate is explained.
// This example demonstrates how to use the standard and relative include directives within shader
// source code. The boilerplate is taken from the "basic-compute-shader.rs" example, where most of
// the boilerplate is explained.
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
@ -35,7 +35,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -69,7 +68,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -93,26 +92,27 @@ fn main() {
// We declare what directories to search for when using the `#include <...>`
// syntax. Specified directories have descending priorities based on their order.
include: ["src/bin/shader-include/standard-shaders"],
src: "
src: r#"
#version 450
// Substitutes this line with the contents of the file `common.glsl` found in one of the standard
// `include` directories specified above.
// Note, that relative inclusion (`#include \"...\"`), although it falls back to standard
// inclusion, should not be used for **embedded** shader source, as it may be misleading and/or
// confusing.
// Substitutes this line with the contents of the file `common.glsl` found in
// one of the standard `include` directories specified above.
// Note that relative inclusion (`#include "..."`), although it falls back to
// standard inclusion, should not be used for **embedded** shader source, as it
// may be misleading and/or confusing.
#include <common.glsl>
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
uint idx = gl_GlobalInvocationID.x;
data.data[idx] = multiply_by_12(data.data[idx]);
data[idx] = multiply_by_12(data[idx]);
}
"
"#,
}
}
let shader = cs::load(device.clone()).unwrap();

View File

@ -7,38 +7,26 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// This example demonstrates how to put derives onto generated Rust structs from
// the Shader types through the "types-meta" options of
// `shader!` macro.
// This example demonstrates how to put derives onto Rust structs generated from the shader types
// through the `custom_derives` option of the `shader!` macro.
// Vulkano Shader macro is capable to generate Rust structs representing each
// type found in the shader source. These structs appear in the `ty` module
// generated in the same module where the macro was called.
// The `shader!` macro is capable of generating Rust structs representing each type found in the
// shader source. These structs are generated in the same module where the macro was called.
//
// By default each type has only `Clone` and `Copy` implementations. For
// ergonomic purposes developer may want to implement more traits on top of each
// type. For example "standard" traits such as `Default` or `Debug`.
// By default each type only has `Clone` and `Copy` derives. For ergonomic purposes you may want to
// add more derives for each type. For example built-in derive macros such as `Default` or `Debug`.
//
// One way to do so is implementing them manually, but it would be hard to do,
// and complicates code maintenances.
//
// Another way is to specify a macro option to automatically put derives and
// blanket impls onto each generated type by the Macro itself.
//
// The macro is capable to implement standard trait derives in smart way ignoring
// `_dummyX` fields whenever these fields make no sense. And in addition to that
// developer can also specify derives of traits from external modules/crates
// whenever such traits provide custom derive feature.
// The only way we can add derive macros to these generated types is if the `shader!` macro
// generates the derive attribute with the wanted derives, hence there's a macro option for it.
use ron::{
from_str,
ser::{to_string_pretty, PrettyConfig},
};
use std::fmt::{Debug, Display, Error, Formatter};
use ron::ser::PrettyConfig;
use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display, Error as FmtError, Formatter};
use vulkano::padded::Padded;
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
struct Foo {
@ -61,62 +49,43 @@ vulkano_shaders::shader! {
void main() {}
",
types_meta: {
use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, PartialEq, Debug, Default, Serialize, Deserialize)]
impl Eq
}
custom_derives: [Clone, Copy, Debug, Default, PartialEq, Serialize, Deserialize],
}
// In the example above the macro generated `Clone`, `Copy`, `PartialEq`,
// `Debug` and `Default` implementations for each declared
// type(`PushConstantData`, `Foo` and `Bar`) in the shader, and applied
// `impl Eq` for each of them too. And it also applied derives of
// `Serialize` and `Deserialize` traits from Serde crate, but it didn't apply
// these things to `Bars` since the `Bars` type does not have size known in
// compile time.
//
// The macro also didn't generate `Display` implementation since we didn't
// specify it. As such we still can implement this trait manually for some
// selected types.
// In the example above the macro generates `Clone`, `Copy`, `Debug`, `Default`, `PartialEq`,
// derives for each declared type (`PushConstantData`, `Foo` and `Bar`) in the shader, and it also
// applies derives of the `Serialize` and `Deserialize` traits from serde. However, it doesn't
// apply any of these to `Bars` since that type does not a have size known at compile time.
impl Display for crate::ty::Foo {
fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), Error> {
// Some traits are not meant to be derived, such as `Display`, but we can still implement them
// manually.
impl Display for Foo {
fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), FmtError> {
Debug::fmt(self, formatter)
}
}
fn main() {
use crate::ty::*;
// Prints "Foo { x: 0.0, z: [100.0, 200.0, 300.0] }" skipping "_dummyX" fields.
// Prints "Foo { x: 0.0, z: [100.0, 200.0, 300.0] }".
println!(
"{}",
"{:?}",
Foo {
z: [100.0, 200.0, 300.0],
..Default::default()
}
},
);
let mut bar = Bar {
y: [5.1, 6.2],
// Fills all fields with zeroes including "_dummyX" fields, so we don't
// have to maintain them manually anymore.
// The `Padded` wrapper here is padding the following field, `foo`.
y: Padded([5.1, 6.2]),
// Fills all fields with zeroes.
..Default::default()
};
// The data inside "_dummyX" has no use, but we still can fill it with
// something different from zeroes.
bar._dummy0 = [5; 8];
// Objects are equal since "_dummyX" fields ignoring during comparison
assert_eq!(
Bar {
y: [5.1, 6.2],
// `Padded<T, N>` implementes `From<T>`, so you can construct it this way as well.
y: [5.1, 6.2].into(),
..Default::default()
},
bar,
@ -124,16 +93,17 @@ fn main() {
assert_ne!(Bar::default(), bar);
bar.foo.x = 125.0;
// `Padded` dereferences into the wrapped type, so we can easily access the underlying data.
*bar.foo.x = 125.0;
// Since we put `Serialize` and `Deserialize` traits to derives list we can
// serialize and deserialize Shader data
// Since we put `Serialize` and `Deserialize` traits to the derives list we can serialize and
// deserialize shader data.
let serialized = to_string_pretty(&bar, PrettyConfig::default()).unwrap();
let serialized = ron::ser::to_string_pretty(&bar, PrettyConfig::default()).unwrap();
println!("Serialized Bar: {serialized}");
let deserialized = from_str::<Bar>(&serialized).unwrap();
let deserialized = ron::from_str::<Bar>(&serialized).unwrap();
assert_eq!(deserialized.foo.x, 125.0);
assert_eq!(*deserialized.foo.x, 125.0);
}

View File

@ -7,25 +7,23 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// This example demonstrates how to compile several shaders together using vulkano-shaders macro,
// such that the macro generates unique Shader types per each compiled shader, but generates common
// shareable set of Rust structs representing corresponding structs in the source glsl code.
// This example demonstrates how to compile several shaders together using the `shader!` macro,
// such that the macro doesn't generate unique shader types for each compiled shader, but generates
// a common shareable set of Rust structs for the corresponding structs in the shaders.
//
// Normally, each vulkano-shaders macro invocation among other things generates a `ty` submodule
// containing all Rust types per each "struct" declaration of glsl code. Using this submodule
// the user can organize type-safe interoperability between Rust code and the shader interface
// input/output data tied to these structs. However, if the user compiles several shaders in
// independent Rust modules, each of these modules would contain independent `ty` submodule with
// each own set of Rust types. So, even if both shaders contain the same(or partially intersecting)
// glsl structs they will be duplicated in each generated `ty` submodule and treated by Rust as
// independent types. As such it would be tricky to organize interoperability between shader
// interfaces in Rust.
// Normally, each `shader!` macro invocation among other things generates all Rust types for each
// `struct` declaration of the GLSL code. Using these the user can organize type-safe
// interoperability between Rust code and the shader input/output interface tied to these structs.
// However, if the user compiles several shaders in independent Rust modules, each of these modules
// would contain an independent set of Rust types. So, even if both shaders contain the same (or
// partially intersecting) GLSL structs they will be duplicated by each macro invocation and
// treated by Rust as independent types. As such it would be tricky to organize interoperability
// between shader interfaces in Rust.
//
// To solve this problem the user can use "shared" generation mode of the macro. In this mode the
// user declares all shaders that possibly share common layout interfaces in a single macro
// invocation. The macro will check that there is no inconsistency between declared glsl structs
// with the same names, and it will put all generated Rust structs for all shaders in just a single
// `ty` submodule.
// invocation. The macro will check that there is no inconsistency between declared GLSL structs
// with the same names, and it will not generate duplicates.
use std::sync::Arc;
use vulkano::{
@ -52,7 +50,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -109,26 +106,26 @@ fn main() {
// their layout interfaces.
//
// First one is just multiplying each value from the input array of ints to provided
// value in push constants struct. And the second one in turn adds this value instead of
// multiplying.
// value in push constants struct. And the second one in turn adds this value instead
// of multiplying.
//
// However both shaders declare glsl struct `Parameters` for push constants in each
// shader. Since each of the struct has exactly the same interface, they will be
// treated by the macro as "shared".
//
// Also, note that glsl code duplications between shader sources is not necessary too.
// In more complex system the user may want to declare independent glsl file with
// such types, and include it in each shader entry-point files using "#include"
// Also, note that GLSL code duplications between shader sources is not necessary too.
// In a more complex system the user may want to declare an independent GLSL file with
// such types, and include it in each shader entry-point file using the `#include`
// directive.
shaders: {
// Generate single unique `SpecializationConstants` struct for all shaders since
// their specialization interfaces are the same. This option is turned off
// by default and the macro by default producing unique
// structs(`MultSpecializationConstants`, `AddSpecializationConstants`)
// Generate single unique `SpecializationConstants` struct for all shaders, since
// their specialization interfaces are the same. This option is turned off by
// default and the macro by default produces unique structs
// (`MultSpecializationConstants` and `AddSpecializationConstants` in this case).
shared_constants: true,
mult: {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
@ -140,20 +137,20 @@ fn main() {
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
if (!enabled) {
return;
}
uint idx = gl_GlobalInvocationID.x;
data.data[idx] *= pc.value;
data[idx] *= pc.value;
}
"
",
},
add: {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
@ -165,35 +162,35 @@ fn main() {
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
if (!enabled) {
return;
}
uint idx = gl_GlobalInvocationID.x;
data.data[idx] += pc.value;
}
"
data[idx] += pc.value;
}
",
},
},
}
// The macro will create the following things in this module:
// - `ShaderMult` for the first shader loader/entry-point.
// - `ShaderAdd` for the second shader loader/entry-point.
// `SpecializationConstants` Rust struct for both shader's specialization constants.
// `ty` submodule with `Parameters` Rust struct common for both shaders.
// - `load_mult` for the first shader loader/entry-point.
// - `load_add` for the second shader loader/entry-point.
// - `SpecializationConstants` struct for both shaders' specialization constants.
// - `Parameters` struct common for both shaders.
}
// We introducing generic function responsible for running any of the shaders above with
// provided Push Constants parameter.
// Note that shader's interface `parameter` here is shader-independent.
/// We are introducing a generic function responsible for running any of the shaders above with
/// the provided push constants parameter. Note that the shaders' interface `parameters` here
/// are shader-independent.
fn run_shader(
pipeline: Arc<ComputePipeline>,
queue: Arc<Queue>,
data_buffer: Subbuffer<[u32]>,
parameters: shaders::ty::Parameters,
parameters: shaders::Parameters,
command_buffer_allocator: &StandardCommandBufferAllocator,
descriptor_set_allocator: &StandardDescriptorSetAllocator,
) {
@ -238,21 +235,18 @@ fn main() {
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
// Preparing test data array `[0, 1, 2, 3....]`
let data_buffer = {
let data_iter = 0..65536u32;
Buffer::from_iter(
// Prepare test array `[0, 1, 2, 3....]`.
let data_buffer = Buffer::from_iter(
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::STORAGE_BUFFER,
..Default::default()
},
data_iter,
0..65536u32,
)
.unwrap()
};
.unwrap();
// Loading the first shader, and creating a Pipeline for the shader
// Load the first shader, and create a pipeline for the shader.
let mult_pipeline = ComputePipeline::new(
device.clone(),
shaders::load_mult(device.clone())
@ -265,7 +259,7 @@ fn main() {
)
.unwrap();
// Loading the second shader, and creating a Pipeline for the shader
// Load the second shader, and create a pipeline for the shader.
let add_pipeline = ComputePipeline::new(
device.clone(),
shaders::load_add(device)
@ -278,32 +272,32 @@ fn main() {
)
.unwrap();
// Multiply each value by 2
// Multiply each value by 2.
run_shader(
mult_pipeline.clone(),
queue.clone(),
data_buffer.clone(),
shaders::ty::Parameters { value: 2 },
shaders::Parameters { value: 2 },
&command_buffer_allocator,
&descriptor_set_allocator,
);
// Then add 1 to each value
// Then add 1 to each value.
run_shader(
add_pipeline,
queue.clone(),
data_buffer.clone(),
shaders::ty::Parameters { value: 1 },
shaders::Parameters { value: 1 },
&command_buffer_allocator,
&descriptor_set_allocator,
);
// Then multiply each value by 3
// Then multiply each value by 3.
run_shader(
mult_pipeline,
queue,
data_buffer.clone(),
shaders::ty::Parameters { value: 3 },
shaders::Parameters { value: 3 },
&command_buffer_allocator,
&descriptor_set_allocator,
);

View File

@ -7,15 +7,14 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
//! A minimal particle-sandbox to demonstrate a reasonable use-case for a device-local buffer.
//! We gain significant runtime performance by writing the inital vertex values to the GPU using
//! a staging buffer and then copying the data to a device-local buffer to be accessed solely
//! by the GPU through the compute shader and as a vertex array.
// A minimal particle-sandbox to demonstrate a reasonable use-case for a device-local buffer. We
// gain significant runtime performance by writing the inital vertex values to the GPU using a
// staging buffer and then copying the data to a device-local buffer to be accessed solely by the
// GPU through the compute shader and as a vertex array.
use bytemuck::{Pod, Zeroable};
use std::{sync::Arc, time::SystemTime};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
CopyBufferInfo, PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassContents,
@ -39,8 +38,10 @@ use vulkano::{
GraphicsPipeline, PipelineBindPoint,
},
render_pass::{Framebuffer, FramebufferCreateInfo, Subpass},
swapchain::{PresentMode, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo},
sync::{future::FenceSignalFuture, GpuFuture},
swapchain::{
acquire_next_image, PresentMode, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
},
sync::{self, future::FenceSignalFuture, GpuFuture},
VulkanLibrary,
};
use vulkano_win::VkSurfaceBuild;
@ -56,15 +57,14 @@ const WINDOW_HEIGHT: u32 = 600;
const PARTICLE_COUNT: usize = 100_000;
fn main() {
// The usual Vulkan initialization.
// Largely the same as example `triangle.rs` until further commentation is provided.
// The usual Vulkan initialization. Largely the same as example `triangle.rs` until further
// commentation is provided.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
let instance = Instance::new(
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -73,7 +73,8 @@ fn main() {
let event_loop = EventLoop::new();
let surface = WindowBuilder::new()
.with_resizable(false) // For simplicity, we are going to assert that the window size is static
// For simplicity, we are going to assert that the window size is static.
.with_resizable(false)
.with_title("simple particles")
.with_inner_size(winit::dpi::PhysicalSize::new(WINDOW_WIDTH, WINDOW_HEIGHT))
.build_vk_surface(&event_loop, instance.clone())
@ -110,8 +111,9 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
@ -196,7 +198,7 @@ fn main() {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 128, local_size_y = 1, local_size_z = 1) in;
@ -212,14 +214,15 @@ fn main() {
};
// Allow push constants to define a parameters of compute.
layout (push_constant) uniform PushConstants
{
layout (push_constant) uniform PushConstants {
vec2 attractor;
float attractor_strength;
float delta_time;
} push;
const float maxSpeed = 10.0; // Keep this value in sync with the `maxSpeed` const in the vertex shader.
// Keep this value in sync with the `maxSpeed` const in the vertex shader.
const float maxSpeed = 10.0;
const float minLength = 0.02;
const float friction = -2.0;
@ -263,12 +266,13 @@ fn main() {
",
}
}
// Vertex shader determines color and is run once per particle.
// The vertices will be updates by the compute shader each frame.
// The vertex shader determines color and is run once per particle. The vertices will be
// updated by the compute shader each frame.
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 pos;
@ -276,23 +280,30 @@ fn main() {
layout(location = 0) out vec4 outColor;
const float maxSpeed = 10.0; // Keep this value in sync with the `maxSpeed` const in the compute shader.
// Keep this value in sync with the `maxSpeed` const in the compute shader.
const float maxSpeed = 10.0;
void main() {
gl_Position = vec4(pos, 0.0, 1.0);
gl_PointSize = 1.0;
// Mix colors based on position and velocity.
outColor = mix(0.2*vec4(pos, abs(vel.x)+abs(vel.y), 1.0), vec4(1.0, 0.5, 0.8, 1.0), sqrt(length(vel)/maxSpeed));
}"
outColor = mix(
0.2 * vec4(pos, abs(vel.x) + abs(vel.y), 1.0),
vec4(1.0, 0.5, 0.8, 1.0),
sqrt(length(vel) / maxSpeed)
);
}
",
}
}
// Fragment shader will only need to apply the color forwarded by the vertex shader.
// This is because the color of a particle should be identical over all pixels.
// The fragment shader will only need to apply the color forwarded by the vertex shader,
// because the color of a particle should be identical over all pixels.
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec4 outColor;
@ -302,7 +313,7 @@ fn main() {
void main() {
fragColor = outColor;
}
"
",
}
}
@ -315,8 +326,8 @@ fn main() {
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
pos: [f32; 2],
@ -349,7 +360,8 @@ fn main() {
)
.unwrap();
// Create a buffer in device-local memory with enough space for `PARTICLE_COUNT` number of `Vertex`.
// Create a buffer in device-local memory with enough space for `PARTICLE_COUNT` number of
// `Vertex`.
let device_local_buffer = Buffer::new_slice::<Vertex>(
&memory_allocator,
BufferAllocateInfo {
@ -390,7 +402,7 @@ fn main() {
device_local_buffer
};
// Create compute-pipeline for applying compute shader to vertices.
// Create a compute-pipeline for applying the compute shader to vertices.
let compute_pipeline = vulkano::pipeline::ComputePipeline::new(
device.clone(),
cs.entry_point("main").unwrap(),
@ -398,20 +410,21 @@ fn main() {
None,
|_| {},
)
.expect("Failed to create compute shader");
.expect("failed to create compute shader");
// Create a new descriptor set for binding vertices as a Storage Buffer.
use vulkano::pipeline::Pipeline; // Required to access layout() method of pipeline.
// Create a new descriptor set for binding vertices as a storage buffer.
use vulkano::pipeline::Pipeline; // Required to access the `layout` method of pipeline.
let descriptor_set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
compute_pipeline
.layout()
.set_layouts()
.get(0) // 0 is the index of the descriptor set.
// 0 is the index of the descriptor set.
.get(0)
.unwrap()
.clone(),
[
// 0 is the binding of the data in this set. We bind the `DeviceLocalBuffer` of vertices here.
// 0 is the binding of the data in this set. We bind the `Buffer` of vertices here.
WriteDescriptorSet::buffer(0, vertex_buffer.clone()),
],
)
@ -428,7 +441,8 @@ fn main() {
let graphics_pipeline = GraphicsPipeline::start()
.vertex_input_state(Vertex::per_vertex())
.vertex_shader(vs.entry_point("main").unwrap(), ())
.input_assembly_state(InputAssemblyState::new().topology(PrimitiveTopology::PointList)) // Vertices will be rendered as a list of points.
// Vertices will be rendered as a list of points.
.input_assembly_state(InputAssemblyState::new().topology(PrimitiveTopology::PointList))
.viewport_state(ViewportState::viewport_fixed_scissor_irrelevant([viewport]))
.fragment_shader(fs.entry_point("main").unwrap(), ())
.render_pass(Subpass::from(render_pass, 0).unwrap())
@ -462,41 +476,42 @@ fn main() {
last_frame_time = now;
// Create push contants to be passed to compute shader.
let push_constants = cs::ty::PushConstants {
let push_constants = cs::PushConstants {
attractor: [0.75 * (3. * time).cos(), 0.6 * (0.75 * time).sin()],
attractor_strength: 1.2 * (2. * time).cos(),
delta_time,
};
// Acquire information on the next swapchain target.
let (image_index, suboptimal, acquire_future) =
match vulkano::swapchain::acquire_next_image(
let (image_index, suboptimal, acquire_future) = match acquire_next_image(
swapchain.clone(),
None, /*timeout*/
None, // timeout
) {
Ok(tuple) => tuple,
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
// Since we disallow resizing, assert the swapchain and surface are optimally configured.
// Since we disallow resizing, assert that the swapchain and surface are optimally
// configured.
assert!(
!suboptimal,
"Not handling sub-optimal swapchains in this sample code"
"not handling sub-optimal swapchains in this sample code",
);
// If this image buffer already has a future then attempt to cleanup fence resources.
// Usually the future for this index will have completed by the time we are rendering it again.
// If this image buffer already has a future then attempt to cleanup fence
// resources. Usually the future for this index will have completed by the time we
// are rendering it again.
if let Some(image_fence) = &mut fences[image_index as usize] {
image_fence.cleanup_finished()
}
// If the previous image has a fence then use it for synchronization, else create a new one.
// If the previous image has a fence then use it for synchronization, else create
// a new one.
let previous_future = match fences[previous_fence_index as usize].clone() {
// Ensure current frame is synchronized with previous.
Some(fence) => fence.boxed(),
// Create new future to guarentee synchronization with (fake) previous frame.
None => vulkano::sync::now(device.clone()).boxed(),
None => sync::now(device.clone()).boxed(),
};
let mut builder = AutoCommandBufferBuilder::primary(
@ -553,7 +568,7 @@ fn main() {
Ok(future) => Some(Arc::new(future)),
// Unknown failure.
Err(e) => panic!("Failed to flush future: {e:?}"),
Err(e) => panic!("failed to flush future: {e}"),
};
previous_fence_index = image_index;
}

View File

@ -7,7 +7,7 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// TODO: Give a paragraph about what specialization are and what problems they solve
// TODO: Give a paragraph about what specialization are and what problems they solve.
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
@ -33,7 +33,6 @@ fn main() {
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -65,9 +64,9 @@ fn main() {
.unwrap();
println!(
"Using device: {} (type: {:?})",
"using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -87,27 +86,27 @@ fn main() {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
src: r"
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(constant_id = 0) const int multiple = 64;
layout(constant_id = 1) const float addend = 64;
layout(constant_id = 2) const bool enable = true;
const vec2 foo = vec2(0, 0); // TODO: How do I hit Instruction::SpecConstantComposite
layout(set = 0, binding = 0) buffer Data {
uint data[];
} data;
};
void main() {
uint idx = gl_GlobalInvocationID.x;
if (enable) {
data.data[idx] *= multiple;
data.data[idx] += uint(addend);
data[idx] *= multiple;
data[idx] += uint(addend);
}
}
"
",
}
}

View File

@ -56,8 +56,8 @@ use winit::{
};
fn main() {
// The start of this example is exactly the same as `triangle`. You should read the
// `triangle` example if you haven't done so yet.
// The start of this example is exactly the same as `triangle`. You should read the `triangle`
// example if you haven't done so yet.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
@ -65,7 +65,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -264,7 +263,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -303,7 +302,7 @@ fn main() {
);
let scale = Matrix4::from_scale(0.01);
let uniform_data = vs::ty::Data {
let uniform_data = vs::Data {
world: Matrix4::from(rotation).into(),
view: (view * scale).into(),
proj: proj.into(),
@ -330,7 +329,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -393,7 +392,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -403,7 +402,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
memory_allocator: &StandardMemoryAllocator,
vs: &ShaderModule,
@ -433,9 +432,9 @@ fn window_size_dependent_setup(
})
.collect::<Vec<_>>();
// In the triangle example we use a dynamic viewport, as its a simple example.
// However in the teapot example, we recreate the pipelines with a hardcoded viewport instead.
// This allows the driver to optimize things, at the cost of slower window resizes.
// In the triangle example we use a dynamic viewport, as its a simple example. However in the
// teapot example, we recreate the pipelines with a hardcoded viewport instead. This allows the
// driver to optimize things, at the cost of slower window resizes.
// https://computergraphics.stackexchange.com/questions/5742/vulkan-best-way-of-updating-pipeline-viewport
let pipeline = GraphicsPipeline::start()
.vertex_input_state([Position::per_vertex(), Normal::per_vertex()])
@ -467,6 +466,6 @@ mod vs {
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
path: "src/bin/teapot/frag.glsl"
path: "src/bin/teapot/frag.glsl",
}
}

View File

@ -8,20 +8,22 @@
// according to those terms.
// Some relevant documentation:
// * Tessellation overview https://www.khronos.org/opengl/wiki/Tessellation
// * Tessellation Control Shader https://www.khronos.org/opengl/wiki/Tessellation_Control_Shader
// * Tessellation Evaluation Shader https://www.khronos.org/opengl/wiki/Tessellation_Evaluation_Shader
// * Tessellation real-world usage 1 http://ogldev.atspace.co.uk/www/tutorial30/tutorial30.html
// * Tessellation real-world usage 2 https://prideout.net/blog/?p=48
//
// - Tessellation overview https://www.khronos.org/opengl/wiki/Tessellation
// - Tessellation Control Shader https://www.khronos.org/opengl/wiki/Tessellation_Control_Shader
// - Tessellation Evaluation Shader https://www.khronos.org/opengl/wiki/Tessellation_Evaluation_Shader
// - Tessellation real-world usage 1 http://ogldev.atspace.co.uk/www/tutorial30/tutorial30.html
// - Tessellation real-world usage 2 https://prideout.net/blog/?p=48
// Notable elements of this example:
// * tessellation control shader and a tessellation evaluation shader
// * tessellation_shaders(..), patch_list(3) and polygon_mode_line() are called on the pipeline builder
//
// - Usage of a tessellation control shader and a tessellation evaluation shader.
// - `tessellation_shaders` and `tessellation_state` are called on the pipeline builder.
// - The use of `PrimitiveTopology::PatchList`.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
RenderPassBeginInfo, SubpassContents,
@ -61,7 +63,7 @@ use winit::{
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -69,64 +71,66 @@ mod vs {
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"
",
}
}
mod tcs {
vulkano_shaders::shader! {
ty: "tess_ctrl",
src: "
src: r"
#version 450
layout (vertices = 3) out; // a value of 3 means a patch consists of a single triangle
// A value of 3 means a patch consists of a single triangle.
layout(vertices = 3) out;
void main(void)
{
// save the position of the patch, so the tes can access it
// We could define our own output variables for this,
// but gl_out is handily provided.
void main(void) {
// Save the position of the patch, so the TES can access it. We could define our
// own output variables for this, but `gl_out` is handily provided.
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
gl_TessLevelInner[0] = 10; // many triangles are generated in the center
gl_TessLevelOuter[0] = 1; // no triangles are generated for this edge
gl_TessLevelOuter[1] = 10; // many triangles are generated for this edge
gl_TessLevelOuter[2] = 10; // many triangles are generated for this edge
// gl_TessLevelInner[1] = only used when tes uses layout(quads)
// gl_TessLevelOuter[3] = only used when tes uses layout(quads)
// Many triangles are generated in the center.
gl_TessLevelInner[0] = 10;
// No triangles are generated for this edge.
gl_TessLevelOuter[0] = 1;
// Many triangles are generated for this edge.
gl_TessLevelOuter[1] = 10;
// Many triangles are generated for this edge.
gl_TessLevelOuter[2] = 10;
// These are only used when TES uses `layout(quads)`.
// gl_TessLevelInner[1] = ...;
// gl_TessLevelOuter[3] = ...;
}
"
",
}
}
// PG
// There is a stage in between tcs and tes called Primitive Generation (PG)
// Shaders cannot be defined for it.
// It takes gl_TessLevelInner and gl_TessLevelOuter and uses them to generate positions within
// the patch and pass them to tes via gl_TessCoord.
// There is a stage in between TCS and TES called Primitive Generation (PG). Shaders cannot be
// defined for it. It takes `gl_TessLevelInner` and `gl_TessLevelOuter` and uses them to generate
// positions within the patch and pass them to TES via `gl_TessCoord`.
//
// When tes uses layout(triangles) then gl_TessCoord is in barrycentric coordinates.
// if layout(quads) is used then gl_TessCoord is in cartesian coordinates.
// Barrycentric coordinates are of the form (x, y, z) where x + y + z = 1
// and the values x, y and z represent the distance from a vertex of the triangle.
// When TES uses `layout(triangles)` then `gl_TessCoord` is in Barycentric coordinates. If
// `layout(quads)` is used then `gl_TessCoord` is in Cartesian coordinates. Barycentric coordinates
// are of the form (x, y, z) where x + y + z = 1 and the values x, y and z represent the distance
// from a vertex of the triangle.
// https://mathworld.wolfram.com/BarycentricCoordinates.html
mod tes {
vulkano_shaders::shader! {
ty: "tess_eval",
src: "
src: r"
#version 450
layout(triangles, equal_spacing, cw) in;
void main(void)
{
// retrieve the vertex positions set by the tcs
void main(void) {
// Retrieve the vertex positions set by the TCS.
vec4 vert_x = gl_in[0].gl_Position;
vec4 vert_y = gl_in[1].gl_Position;
vec4 vert_z = gl_in[2].gl_Position;
// convert gl_TessCoord from barycentric coordinates to cartesian coordinates
// Convert `gl_TessCoord` from Barycentric coordinates to Cartesian coordinates.
gl_Position = vec4(
gl_TessCoord.x * vert_x.x + gl_TessCoord.y * vert_y.x + gl_TessCoord.z * vert_z.x,
gl_TessCoord.x * vert_x.y + gl_TessCoord.y * vert_y.y + gl_TessCoord.z * vert_z.y,
@ -134,14 +138,14 @@ mod tes {
1.0
);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -149,7 +153,7 @@ mod fs {
void main() {
f_color = vec4(1.0, 1.0, 1.0, 1.0);
}
"
",
}
}
@ -160,7 +164,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -209,7 +212,7 @@ fn main() {
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
physical_device.properties().device_type,
);
let (device, mut queues) = Device::new(
@ -262,7 +265,7 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
#[derive(BufferContents, Vertex)]
#[repr(C)]
struct Vertex {
#[format(R32G32_SFLOAT)]
@ -396,7 +399,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -412,7 +415,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -466,7 +469,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -475,7 +478,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,

View File

@ -7,10 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use std::{io::Cursor, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
PrimaryCommandBufferAbstract, RenderPassBeginInfo, SubpassContents,
@ -66,7 +65,6 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -161,8 +159,8 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -239,11 +237,14 @@ fn main() {
image_data
})
.collect();
// Replace with your actual image array dimensions.
let dimensions = ImageDimensions::Dim2d {
width: 128,
height: 128,
array_layers: 3,
}; // Replace with your actual image array dimensions
};
let image = ImmutableImage::from_iter(
&memory_allocator,
image_array_data,
@ -253,6 +254,7 @@ fn main() {
&mut uploads,
)
.unwrap();
ImageView::new_default(image).unwrap()
};
@ -324,7 +326,7 @@ fn main() {
}) {
Ok(r) => r,
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
@ -340,7 +342,7 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
@ -400,7 +402,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -409,7 +411,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,
@ -437,7 +439,7 @@ fn window_size_dependent_setup(
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -451,14 +453,15 @@ void main() {
gl_Position = vec4(position, 0.0, 1.0);
tex_coords = vec2(x[gl_VertexIndex], y[gl_VertexIndex]);
layer = gl_InstanceIndex;
}"
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) in vec2 tex_coords;
@ -469,6 +472,7 @@ layout(set = 0, binding = 0) uniform sampler2DArray tex;
void main() {
f_color = texture(tex, vec3(tex_coords, layer));
}"
}
",
}
}

View File

@ -12,19 +12,18 @@
// This is the only example that is entirely detailed. All the other examples avoid code
// duplication by using helper functions.
//
// This example assumes that you are already more or less familiar with graphics programming
// and that you want to learn Vulkan. This means that for example it won't go into details about
// what a vertex or a shader is.
// This example assumes that you are already more or less familiar with graphics programming and
// that you want to learn Vulkan. This means that for example it won't go into details about what a
// vertex or a shader is.
//
// This version of the triangle example is written using dynamic rendering instead of render pass
// and framebuffer objects. If your device does not support Vulkan 1.3 or the
// `khr_dynamic_rendering` extension, or if you want to see how to support older versions, see the
// original triangle example.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
RenderingAttachmentInfo, RenderingInfo,
@ -61,14 +60,15 @@ use winit::{
};
fn main() {
let library = VulkanLibrary::new().unwrap();
// The first step of any Vulkan program is to create an instance.
//
// When we create an instance, we have to pass a list of extensions that we want to enable.
//
// All the window-drawing functionalities are part of non-core extensions that we need
// to enable manually. To do so, we ask the `vulkano_win` crate for the list of extensions
// All the window-drawing functionalities are part of non-core extensions that we need to
// enable manually. To do so, we ask the `vulkano_win` crate for the list of extensions
// required to draw to a window.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
// Now creating the instance.
@ -76,7 +76,8 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
// Enable enumerating devices that use non-conformant Vulkan implementations. (e.g.
// MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -91,16 +92,15 @@ fn main() {
// ever get an error about `build_vk_surface` being undefined in one of your projects, this
// probably means that you forgot to import this trait.
//
// This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit
// window and a cross-platform Vulkan surface that represents the surface of the window.
// This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform
// winit window and a cross-platform Vulkan surface that represents the surface of the window.
let event_loop = EventLoop::new();
let surface = WindowBuilder::new()
.build_vk_surface(&event_loop, instance.clone())
.unwrap();
// Choose device extensions that we're going to use.
// In order to present images to a surface, we need a `Swapchain`, which is provided by the
// `khr_swapchain` extension.
// Choose device extensions that we're going to use. In order to present images to a surface,
// we need a `Swapchain`, which is provided by the `khr_swapchain` extension.
let mut device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
@ -128,11 +128,11 @@ fn main() {
//
// Devices can provide multiple queues to run commands in parallel (for example a draw
// queue and a compute queue), similar to CPU threads. This is something you have to
// have to manage manually in Vulkan. Queues of the same type belong to the same
// queue family.
// have to manage manually in Vulkan. Queues of the same type belong to the same queue
// family.
//
// Here, we look for a single queue family that is suitable for our purposes. In a
// real-life application, you may want to use a separate dedicated transfer queue to
// real-world application, you may want to use a separate dedicated transfer queue to
// handle data transfers in parallel with graphics operations. You may also need a
// separate queue for compute operations, if your application uses those.
p.queue_family_properties()
@ -140,8 +140,8 @@ fn main() {
.enumerate()
.position(|(i, q)| {
// We select a queue family that supports graphics operations. When drawing to
// a window surface, as we do in this example, we also need to check that queues
// in this queue family are capable of presenting images to the surface.
// a window surface, as we do in this example, we also need to check that
// queues in this queue family are capable of presenting images to the surface.
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
@ -151,13 +151,12 @@ fn main() {
.map(|i| (p, i as u32))
})
// All the physical devices that pass the filters above are suitable for the application.
// However, not every device is equal, some are preferred over others. Now, we assign
// each physical device a score, and pick the device with the
// lowest ("best") score.
// However, not every device is equal, some are preferred over others. Now, we assign each
// physical device a score, and pick the device with the lowest ("best") score.
//
// In this example, we simply select the best-scoring device to use in the application.
// In a real-life setting, you may want to use the best-scoring device only as a
// "default" or "recommended" device, and let the user choose the device themselves.
// In a real-world setting, you may want to use the best-scoring device only as a "default"
// or "recommended" device, and let the user choose the device themself.
.min_by_key(|(p, _)| {
// We assign a lower score to device types that are likely to be faster/better.
match p.properties().device_type {
@ -169,7 +168,7 @@ fn main() {
_ => 5,
}
})
.expect("No suitable physical device found");
.expect("no suitable physical device found");
// Some little debug infos.
println!(
@ -189,7 +188,7 @@ fn main() {
// Now initializing the device. This is probably the most important object of Vulkan.
//
// The iterator of created queues is returned by the function alongside the device.
// An iterator of created queues is returned by the function alongside the device.
let (device, mut queues) = Device::new(
// Which physical device to connect to.
physical_device,
@ -223,17 +222,17 @@ fn main() {
)
.unwrap();
// Since we can request multiple queues, the `queues` variable is in fact an iterator. We
// only use one queue in this example, so we just retrieve the first and only element of the
// Since we can request multiple queues, the `queues` variable is in fact an iterator. We only
// use one queue in this example, so we just retrieve the first and only element of the
// iterator.
let queue = queues.next().unwrap();
// Before we can draw on the surface, we have to create what is called a swapchain. Creating
// a swapchain allocates the color buffers that will contain the image that will ultimately
// be visible on the screen. These images are returned alongside the swapchain.
// Before we can draw on the surface, we have to create what is called a swapchain. Creating a
// swapchain allocates the color buffers that will contain the image that will ultimately be
// visible on the screen. These images are returned alongside the swapchain.
let (mut swapchain, images) = {
// Querying the capabilities of the surface. When we create the swapchain we can only
// pass values that are allowed by the capabilities.
// Querying the capabilities of the surface. When we create the swapchain we can only pass
// values that are allowed by the capabilities.
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
@ -257,17 +256,17 @@ fn main() {
min_image_count: surface_capabilities.min_image_count,
image_format,
// The dimensions of the window, only used to initially setup the swapchain.
//
// NOTE:
// On some drivers the swapchain dimensions are specified by
// `surface_capabilities.current_extent` and the swapchain size must use these
// dimensions.
// These dimensions are always the same as the window dimensions.
// dimensions. These dimensions are always the same as the window dimensions.
//
// However, other drivers don't specify a value, i.e.
// `surface_capabilities.current_extent` is `None`. These drivers will allow
// anything, but the only sensible value is the window
// dimensions.
// anything, but the only sensible value is the window dimensions.
//
// Both of these cases need the swapchain to use the window dimensions, so we just
// use that.
@ -291,11 +290,11 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
// We now create a buffer that will store the shape of our triangle.
// We use #[repr(C)] here to force rustc to not do anything funky with our data, although for this
// particular example, it doesn't actually change the in-memory representation.
// We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here
// to force rustc to use a defined layout for our data, as the default representation has *no
// guarantees*.
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -324,24 +323,22 @@ fn main() {
// The next step is to create the shaders.
//
// The raw shader creation API provided by the vulkano library is unsafe for various
// reasons, so The `shader!` macro provides a way to generate a Rust module from GLSL
// source - in the example below, the source is provided as a string input directly to
// the shader, but a path to a source file can be provided as well. Note that the user
// must specify the type of shader (e.g., "vertex," "fragment, etc.") using the `ty`
// option of the macro.
// The raw shader creation API provided by the vulkano library is unsafe for various reasons,
// so The `shader!` macro provides a way to generate a Rust module from GLSL source - in the
// example below, the source is provided as a string input directly to the shader, but a path
// to a source file can be provided as well. Note that the user must specify the type of shader
// (e.g. "vertex", "fragment", etc.) using the `ty` option of the macro.
//
// The module generated by the `shader!` macro includes a `load` function which loads
// the shader using an input logical device. The module also includes type definitions
// for layout structures defined in the shader source, for example, uniforms and push
// constants.
// The items generated by the `shader!` macro include a `load` function which loads the shader
// using an input logical device. The module also includes type definitions for layout
// structures defined in the shader source, for example uniforms and push constants.
//
// A more detailed overview of what the `shader!` macro generates can be found in the
// `vulkano-shaders` crate docs. You can view them at https://docs.rs/vulkano-shaders/
// vulkano-shaders crate docs. You can view them at https://docs.rs/vulkano-shaders/
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -349,14 +346,14 @@ fn main() {
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -364,7 +361,7 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
",
}
}
@ -392,8 +389,8 @@ fn main() {
.vertex_input_state(Vertex::per_vertex())
// The content of the vertex buffer describes a list of triangles.
.input_assembly_state(InputAssemblyState::new())
// A Vulkan shader can in theory contain multiple entry points, so we have to specify
// which one.
// A Vulkan shader can in theory contain multiple entry points, so we have to specify which
// one.
.vertex_shader(vs.entry_point("main").unwrap(), ())
// Use a resizable viewport set to draw over the entire window
.viewport_state(ViewportState::viewport_dynamic_scissor_irrelevant())
@ -403,7 +400,7 @@ fn main() {
.build(device.clone())
.unwrap();
// Dynamic viewports allow us to recreate just the viewport when the window is resized
// Dynamic viewports allow us to recreate just the viewport when the window is resized.
// Otherwise we would have to recreate the whole pipeline.
let mut viewport = Viewport {
origin: [0.0, 0.0],
@ -433,8 +430,8 @@ fn main() {
//
// In this situation, acquiring a swapchain image or presenting it will return an error.
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain.
// Here, we remember that we need to do this for the next loop iteration.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain. Here,
// we remember that we need to do this for the next loop iteration.
let mut recreate_swapchain = false;
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
@ -460,14 +457,15 @@ fn main() {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
// It is important to call this function from time to time, otherwise resources will keep
// accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU has
// already processed, and frees the resources that are no longer needed.
// It is important to call this function from time to time, otherwise resources
// will keep accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU
// has already processed, and frees the resources that are no longer needed.
previous_frame_end.as_mut().unwrap().cleanup_finished();
// Whenever the window resizes we need to recreate everything dependent on the window size.
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
// Whenever the window resizes we need to recreate everything dependent on the
// window size. In this example that includes the swapchain, the framebuffers and
// the dynamic state viewport.
if recreate_swapchain {
// Get the new dimensions of the window.
let window = surface.object().unwrap().downcast_ref::<Window>().unwrap();
@ -478,27 +476,30 @@ fn main() {
..swapchain.create_info()
}) {
Ok(r) => r,
// This error tends to happen when the user is manually resizing the window.
// Simply restarting the loop is the easiest way to fix this issue.
// This error tends to happen when the user is manually resizing the
// window. Simply restarting the loop is the easiest way to fix this
// issue.
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
// Now that we have new swapchain images, we must create new image views from
// them as well.
attachment_image_views =
window_size_dependent_setup(&new_images, &mut viewport);
recreate_swapchain = false;
}
// Before we can draw on the output, we have to *acquire* an image from the swapchain. If
// no image is available (which happens if you submit draw commands too quickly), then the
// function will block.
// This operation returns the index of the image that we are allowed to draw upon.
// Before we can draw on the output, we have to *acquire* an image from the
// swapchain. If no image is available (which happens if you submit draw commands
// too quickly), then the function will block. This operation returns the index of
// the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is an optional timeout
// after which the function call will return an error.
// This function can block if no image is available. The parameter is an optional
// timeout after which the function call will return an error.
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None) {
Ok(r) => r,
@ -506,25 +507,26 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
// acquire_next_image can be successful, but suboptimal. This means that the swapchain image
// will still work, but it may not display correctly. With some drivers this can be when
// the window resizes, but it may not cause the swapchain to become out of date.
// `acquire_next_image` can be successful, but suboptimal. This means that the
// swapchain image will still work, but it may not display correctly. With some
// drivers this can be when the window resizes, but it may not cause the swapchain
// to become out of date.
if suboptimal {
recreate_swapchain = true;
}
// In order to draw, we have to build a *command buffer*. The command buffer object holds
// the list of commands that are going to be executed.
// In order to draw, we have to build a *command buffer*. The command buffer object
// holds the list of commands that are going to be executed.
//
// Building a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to be
// optimized.
// microseconds), but it is known to be a hot path in the driver and is expected to
// be optimized.
//
// Note that we have to pass a queue family when we create the command buffer. The command
// buffer will only be executable on that given queue family.
// Note that we have to pass a queue family when we create the command buffer. The
// command buffer will only be executable on that given queue family.
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
@ -537,20 +539,20 @@ fn main() {
// attachments we are going to use for rendering here, which needs to match
// what was previously specified when creating the pipeline.
.begin_rendering(RenderingInfo {
// As before, we specify one color attachment, but now we specify
// the image view to use as well as how it should be used.
// As before, we specify one color attachment, but now we specify the image
// view to use as well as how it should be used.
color_attachments: vec![Some(RenderingAttachmentInfo {
// `Clear` means that we ask the GPU to clear the content of this
// attachment at the start of rendering.
load_op: LoadOp::Clear,
// `Store` means that we ask the GPU to store the rendered output
// in the attachment image. We could also ask it to discard the result.
// `Store` means that we ask the GPU to store the rendered output in
// the attachment image. We could also ask it to discard the result.
store_op: StoreOp::Store,
// The value to clear the attachment with. Here we clear it with a
// blue color.
// The value to clear the attachment with. Here we clear it with a blue
// color.
//
// Only attachments that have `LoadOp::Clear` are provided with
// clear values, any others should use `None` as the clear value.
// Only attachments that have `LoadOp::Clear` are provided with clear
// values, any others should use `None` as the clear value.
clear_value: Some([0.0, 0.0, 1.0, 1.0].into()),
..RenderingAttachmentInfo::image_view(
// We specify image view corresponding to the currently acquired
@ -561,13 +563,13 @@ fn main() {
..Default::default()
})
.unwrap()
// We are now inside the first subpass of the render pass. We add a draw command.
// We are now inside the first subpass of the render pass.
//
// The last two parameters contain the list of resources to pass to the shaders.
// Since we used an `EmptyPipeline` object, the objects have to be `()`.
// TODO: Document state setting and how it affects subsequent draw commands.
.set_viewport(0, [viewport.clone()])
.bind_pipeline_graphics(pipeline.clone())
.bind_vertex_buffers(0, vertex_buffer.clone())
// We add a draw command.
.draw(vertex_buffer.len() as u32, 1, 0, 0)
.unwrap()
// We leave the render pass.
@ -583,12 +585,14 @@ fn main() {
.join(acquire_future)
.then_execute(queue.clone(), command_buffer)
.unwrap()
// The color output is now expected to contain our triangle. But in order to show it on
// the screen, we have to *present* the image by calling `present`.
// The color output is now expected to contain our triangle. But in order to
// show it on the screen, we have to *present* the image by calling
// `then_swapchain_present`.
//
// This function does not actually present the image immediately. Instead it submits a
// present command at the end of the queue. This means that it will only be presented once
// the GPU has finished executing the command buffer that draws the triangle.
// This function does not actually present the image immediately. Instead it
// submits a present command at the end of the queue. This means that it will
// only be presented once the GPU has finished executing the command buffer
// that draws the triangle.
.then_swapchain_present(
queue.clone(),
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
@ -604,7 +608,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("Failed to flush future: {e:?}");
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -614,7 +618,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
viewport: &mut Viewport,

View File

@ -12,14 +12,13 @@
// This is the only example that is entirely detailed. All the other examples avoid code
// duplication by using helper functions.
//
// This example assumes that you are already more or less familiar with graphics programming
// and that you want to learn Vulkan. This means that for example it won't go into details about
// what a vertex or a shader is.
// This example assumes that you are already more or less familiar with graphics programming and
// that you want to learn Vulkan. This means that for example it won't go into details about what a
// vertex or a shader is.
use bytemuck::{Pod, Zeroable};
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferAllocateInfo, BufferUsage},
buffer::{Buffer, BufferAllocateInfo, BufferContents, BufferUsage},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
RenderPassBeginInfo, SubpassContents,
@ -55,14 +54,15 @@ use winit::{
};
fn main() {
let library = VulkanLibrary::new().unwrap();
// The first step of any Vulkan program is to create an instance.
//
// When we create an instance, we have to pass a list of extensions that we want to enable.
//
// All the window-drawing functionalities are part of non-core extensions that we need
// to enable manually. To do so, we ask the `vulkano_win` crate for the list of extensions
// All the window-drawing functionalities are part of non-core extensions that we need to
// enable manually. To do so, we ask the `vulkano_win` crate for the list of extensions
// required to draw to a window.
let library = VulkanLibrary::new().unwrap();
let required_extensions = vulkano_win::required_extensions(&library);
// Now creating the instance.
@ -70,7 +70,8 @@ fn main() {
library,
InstanceCreateInfo {
enabled_extensions: required_extensions,
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
// Enable enumerating devices that use non-conformant Vulkan implementations. (e.g.
// MoltenVK)
enumerate_portability: true,
..Default::default()
},
@ -85,16 +86,15 @@ fn main() {
// ever get an error about `build_vk_surface` being undefined in one of your projects, this
// probably means that you forgot to import this trait.
//
// This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform winit
// window and a cross-platform Vulkan surface that represents the surface of the window.
// This returns a `vulkano::swapchain::Surface` object that contains both a cross-platform
// winit window and a cross-platform Vulkan surface that represents the surface of the window.
let event_loop = EventLoop::new();
let surface = WindowBuilder::new()
.build_vk_surface(&event_loop, instance.clone())
.unwrap();
// Choose device extensions that we're going to use.
// In order to present images to a surface, we need a `Swapchain`, which is provided by the
// `khr_swapchain` extension.
// Choose device extensions that we're going to use. In order to present images to a surface,
// we need a `Swapchain`, which is provided by the `khr_swapchain` extension.
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
@ -117,11 +117,11 @@ fn main() {
//
// Devices can provide multiple queues to run commands in parallel (for example a draw
// queue and a compute queue), similar to CPU threads. This is something you have to
// have to manage manually in Vulkan. Queues of the same type belong to the same
// queue family.
// have to manage manually in Vulkan. Queues of the same type belong to the same queue
// family.
//
// Here, we look for a single queue family that is suitable for our purposes. In a
// real-life application, you may want to use a separate dedicated transfer queue to
// real-world application, you may want to use a separate dedicated transfer queue to
// handle data transfers in parallel with graphics operations. You may also need a
// separate queue for compute operations, if your application uses those.
p.queue_family_properties()
@ -129,8 +129,8 @@ fn main() {
.enumerate()
.position(|(i, q)| {
// We select a queue family that supports graphics operations. When drawing to
// a window surface, as we do in this example, we also need to check that queues
// in this queue family are capable of presenting images to the surface.
// a window surface, as we do in this example, we also need to check that
// queues in this queue family are capable of presenting images to the surface.
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
@ -140,13 +140,12 @@ fn main() {
.map(|i| (p, i as u32))
})
// All the physical devices that pass the filters above are suitable for the application.
// However, not every device is equal, some are preferred over others. Now, we assign
// each physical device a score, and pick the device with the
// lowest ("best") score.
// However, not every device is equal, some are preferred over others. Now, we assign each
// physical device a score, and pick the device with the lowest ("best") score.
//
// In this example, we simply select the best-scoring device to use in the application.
// In a real-life setting, you may want to use the best-scoring device only as a
// "default" or "recommended" device, and let the user choose the device themselves.
// In a real-world setting, you may want to use the best-scoring device only as a "default"
// or "recommended" device, and let the user choose the device themself.
.min_by_key(|(p, _)| {
// We assign a lower score to device types that are likely to be faster/better.
match p.properties().device_type {
@ -158,7 +157,7 @@ fn main() {
_ => 5,
}
})
.expect("No suitable physical device found");
.expect("no suitable physical device found");
// Some little debug infos.
println!(
@ -169,7 +168,7 @@ fn main() {
// Now initializing the device. This is probably the most important object of Vulkan.
//
// The iterator of created queues is returned by the function alongside the device.
// An iterator of created queues is returned by the function alongside the device.
let (device, mut queues) = Device::new(
// Which physical device to connect to.
physical_device,
@ -192,17 +191,17 @@ fn main() {
)
.unwrap();
// Since we can request multiple queues, the `queues` variable is in fact an iterator. We
// only use one queue in this example, so we just retrieve the first and only element of the
// Since we can request multiple queues, the `queues` variable is in fact an iterator. We only
// use one queue in this example, so we just retrieve the first and only element of the
// iterator.
let queue = queues.next().unwrap();
// Before we can draw on the surface, we have to create what is called a swapchain. Creating
// a swapchain allocates the color buffers that will contain the image that will ultimately
// be visible on the screen. These images are returned alongside the swapchain.
// Before we can draw on the surface, we have to create what is called a swapchain. Creating a
// swapchain allocates the color buffers that will contain the image that will ultimately be
// visible on the screen. These images are returned alongside the swapchain.
let (mut swapchain, images) = {
// Querying the capabilities of the surface. When we create the swapchain we can only
// pass values that are allowed by the capabilities.
// Querying the capabilities of the surface. When we create the swapchain we can only pass
// values that are allowed by the capabilities.
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
@ -226,17 +225,17 @@ fn main() {
min_image_count: surface_capabilities.min_image_count,
image_format,
// The dimensions of the window, only used to initially setup the swapchain.
//
// NOTE:
// On some drivers the swapchain dimensions are specified by
// `surface_capabilities.current_extent` and the swapchain size must use these
// dimensions.
// These dimensions are always the same as the window dimensions.
// dimensions. These dimensions are always the same as the window dimensions.
//
// However, other drivers don't specify a value, i.e.
// `surface_capabilities.current_extent` is `None`. These drivers will allow
// anything, but the only sensible value is the window
// dimensions.
// anything, but the only sensible value is the window dimensions.
//
// Both of these cases need the swapchain to use the window dimensions, so we just
// use that.
@ -260,11 +259,11 @@ fn main() {
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
// We now create a buffer that will store the shape of our triangle.
// We use #[repr(C)] here to force rustc to not do anything funky with our data, although for this
// particular example, it doesn't actually change the in-memory representation.
// We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here
// to force rustc to use a defined layout for our data, as the default representation has *no
// guarantees*.
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
struct Vertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
@ -293,24 +292,22 @@ fn main() {
// The next step is to create the shaders.
//
// The raw shader creation API provided by the vulkano library is unsafe for various
// reasons, so The `shader!` macro provides a way to generate a Rust module from GLSL
// source - in the example below, the source is provided as a string input directly to
// the shader, but a path to a source file can be provided as well. Note that the user
// must specify the type of shader (e.g., "vertex," "fragment, etc.") using the `ty`
// option of the macro.
// The raw shader creation API provided by the vulkano library is unsafe for various reasons,
// so The `shader!` macro provides a way to generate a Rust module from GLSL source - in the
// example below, the source is provided as a string input directly to the shader, but a path
// to a source file can be provided as well. Note that the user must specify the type of shader
// (e.g. "vertex", "fragment", etc.) using the `ty` option of the macro.
//
// The module generated by the `shader!` macro includes a `load` function which loads
// the shader using an input logical device. The module also includes type definitions
// for layout structures defined in the shader source, for example, uniforms and push
// constants.
// The items generated by the `shader!` macro include a `load` function which loads the shader
// using an input logical device. The module also includes type definitions for layout
// structures defined in the shader source, for example uniforms and push constants.
//
// A more detailed overview of what the `shader!` macro generates can be found in the
// `vulkano-shaders` crate docs. You can view them at https://docs.rs/vulkano-shaders/
// vulkano-shaders crate docs. You can view them at https://docs.rs/vulkano-shaders/
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
src: r"
#version 450
layout(location = 0) in vec2 position;
@ -318,14 +315,14 @@ fn main() {
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
src: r"
#version 450
layout(location = 0) out vec4 f_color;
@ -333,7 +330,7 @@ fn main() {
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
"
",
}
}
@ -345,27 +342,28 @@ fn main() {
// manually.
// The next step is to create a *render pass*, which is an object that describes where the
// output of the graphics pipeline will go. It describes the layout of the images
// where the colors, depth and/or stencil information will be written.
// output of the graphics pipeline will go. It describes the layout of the images where the
// colors, depth and/or stencil information will be written.
let render_pass = vulkano::single_pass_renderpass!(
device.clone(),
attachments: {
// `color` is a custom name we give to the first and only attachment.
color: {
// `load: Clear` means that we ask the GPU to clear the content of this
// attachment at the start of the drawing.
// `load: Clear` means that we ask the GPU to clear the content of this attachment
// at the start of the drawing.
load: Clear,
// `store: Store` means that we ask the GPU to store the output of the draw
// in the actual image. We could also ask it to discard the result.
// `store: Store` means that we ask the GPU to store the output of the draw in the
// actual image. We could also ask it to discard the result.
store: Store,
// `format: <ty>` indicates the type of the format of the image. This has to
// be one of the types of the `vulkano::format` module (or alternatively one
// of your structs that implements the `FormatDesc` trait). Here we use the
// same format as the swapchain.
// `format: <ty>` indicates the type of the format of the image. This has to be one
// of the types of the `vulkano::format` module (or alternatively one of your
// structs that implements the `FormatDesc` trait). Here we use the same format as
// the swapchain.
format: swapchain.image_format(),
// `samples: 1` means that we ask the GPU to use one sample to determine the value
// of each pixel in the color attachment. We could use a larger value (multisampling)
// for antialiasing. An example of this can be found in msaa-renderpass.rs.
// of each pixel in the color attachment. We could use a larger value
// (multisampling) for antialiasing. An example of this can be found in
// msaa-renderpass.rs.
samples: 1,
}
},
@ -399,7 +397,7 @@ fn main() {
.build(device.clone())
.unwrap();
// Dynamic viewports allow us to recreate just the viewport when the window is resized
// Dynamic viewports allow us to recreate just the viewport when the window is resized.
// Otherwise we would have to recreate the whole pipeline.
let mut viewport = Viewport {
origin: [0.0, 0.0],
@ -429,8 +427,8 @@ fn main() {
//
// In this situation, acquiring a swapchain image or presenting it will return an error.
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain.
// Here, we remember that we need to do this for the next loop iteration.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain. Here,
// we remember that we need to do this for the next loop iteration.
let mut recreate_swapchain = false;
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
@ -456,22 +454,23 @@ fn main() {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
// Do not draw frame when screen dimensions are zero.
// On Windows, this can occur from minimizing the application.
// Do not draw the frame when the screen dimensions are zero. On Windows, this can
// occur when minimizing the application.
let window = surface.object().unwrap().downcast_ref::<Window>().unwrap();
let dimensions = window.inner_size();
if dimensions.width == 0 || dimensions.height == 0 {
return;
}
// It is important to call this function from time to time, otherwise resources will keep
// accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU has
// already processed, and frees the resources that are no longer needed.
// It is important to call this function from time to time, otherwise resources
// will keep accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU
// has already processed, and frees the resources that are no longer needed.
previous_frame_end.as_mut().unwrap().cleanup_finished();
// Whenever the window resizes we need to recreate everything dependent on the window size.
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
// Whenever the window resizes we need to recreate everything dependent on the
// window size. In this example that includes the swapchain, the framebuffers and
// the dynamic state viewport.
if recreate_swapchain {
// Use the new dimensions of the window.
@ -481,30 +480,33 @@ fn main() {
..swapchain.create_info()
}) {
Ok(r) => r,
// This error tends to happen when the user is manually resizing the window.
// Simply restarting the loop is the easiest way to fix this issue.
// This error tends to happen when the user is manually resizing the
// window. Simply restarting the loop is the easiest way to fix this
// issue.
Err(SwapchainCreationError::ImageExtentNotSupported { .. }) => return,
Err(e) => panic!("Failed to recreate swapchain: {e:?}"),
Err(e) => panic!("failed to recreate swapchain: {e}"),
};
swapchain = new_swapchain;
// Because framebuffers contains an Arc on the old swapchain, we need to
// Because framebuffers contains a reference to the old swapchain, we need to
// recreate framebuffers as well.
framebuffers = window_size_dependent_setup(
&new_images,
render_pass.clone(),
&mut viewport,
);
recreate_swapchain = false;
}
// Before we can draw on the output, we have to *acquire* an image from the swapchain. If
// no image is available (which happens if you submit draw commands too quickly), then the
// function will block.
// This operation returns the index of the image that we are allowed to draw upon.
// Before we can draw on the output, we have to *acquire* an image from the
// swapchain. If no image is available (which happens if you submit draw commands
// too quickly), then the function will block. This operation returns the index of
// the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is an optional timeout
// after which the function call will return an error.
// This function can block if no image is available. The parameter is an optional
// timeout after which the function call will return an error.
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None) {
Ok(r) => r,
@ -512,25 +514,26 @@ fn main() {
recreate_swapchain = true;
return;
}
Err(e) => panic!("Failed to acquire next image: {e:?}"),
Err(e) => panic!("failed to acquire next image: {e}"),
};
// acquire_next_image can be successful, but suboptimal. This means that the swapchain image
// will still work, but it may not display correctly. With some drivers this can be when
// the window resizes, but it may not cause the swapchain to become out of date.
// `acquire_next_image` can be successful, but suboptimal. This means that the
// swapchain image will still work, but it may not display correctly. With some
// drivers this can be when the window resizes, but it may not cause the swapchain
// to become out of date.
if suboptimal {
recreate_swapchain = true;
}
// In order to draw, we have to build a *command buffer*. The command buffer object holds
// the list of commands that are going to be executed.
// In order to draw, we have to build a *command buffer*. The command buffer object
// holds the list of commands that are going to be executed.
//
// Building a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to be
// optimized.
// microseconds), but it is known to be a hot path in the driver and is expected to
// be optimized.
//
// Note that we have to pass a queue family when we create the command buffer. The command
// buffer will only be executable on that given queue family.
// Note that we have to pass a queue family when we create the command buffer. The
// command buffer will only be executable on that given queue family.
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
@ -543,12 +546,13 @@ fn main() {
.begin_render_pass(
RenderPassBeginInfo {
// A list of values to clear the attachments with. This list contains
// one item for each attachment in the render pass. In this case,
// there is only one attachment, and we clear it with a blue color.
// one item for each attachment in the render pass. In this case, there
// is only one attachment, and we clear it with a blue color.
//
// Only attachments that have `LoadOp::Clear` are provided with clear
// values, any others should use `ClearValue::None` as the clear value.
clear_values: vec![Some([0.0, 0.0, 1.0, 1.0].into())],
..RenderPassBeginInfo::framebuffer(
framebuffers[image_index as usize].clone(),
)
@ -559,17 +563,17 @@ fn main() {
SubpassContents::Inline,
)
.unwrap()
// We are now inside the first subpass of the render pass. We add a draw command.
// We are now inside the first subpass of the render pass.
//
// The last two parameters contain the list of resources to pass to the shaders.
// Since we used an `EmptyPipeline` object, the objects have to be `()`.
// TODO: Document state setting and how it affects subsequent draw commands.
.set_viewport(0, [viewport.clone()])
.bind_pipeline_graphics(pipeline.clone())
.bind_vertex_buffers(0, vertex_buffer.clone())
// We add a draw command.
.draw(vertex_buffer.len() as u32, 1, 0, 0)
.unwrap()
// We leave the render pass. Note that if we had multiple
// subpasses we could have called `next_subpass` to jump to the next subpass.
// We leave the render pass. Note that if we had multiple subpasses we could
// have called `next_subpass` to jump to the next subpass.
.end_render_pass()
.unwrap();
@ -582,12 +586,14 @@ fn main() {
.join(acquire_future)
.then_execute(queue.clone(), command_buffer)
.unwrap()
// The color output is now expected to contain our triangle. But in order to show it on
// the screen, we have to *present* the image by calling `present`.
// The color output is now expected to contain our triangle. But in order to
// show it on the screen, we have to *present* the image by calling
// `then_swapchain_present`.
//
// This function does not actually present the image immediately. Instead it submits a
// present command at the end of the queue. This means that it will only be presented once
// the GPU has finished executing the command buffer that draws the triangle.
// This function does not actually present the image immediately. Instead it
// submits a present command at the end of the queue. This means that it will
// only be presented once the GPU has finished executing the command buffer
// that draws the triangle.
.then_swapchain_present(
queue.clone(),
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
@ -603,7 +609,7 @@ fn main() {
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
panic!("Failed to flush future: {e:?}");
panic!("failed to flush future: {e}");
// previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
@ -613,7 +619,7 @@ fn main() {
});
}
/// This method is called once during initialization, then again whenever the window is resized
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<SwapchainImage>],
render_pass: Arc<RenderPass>,

View File

@ -7,11 +7,10 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use bytemuck::{Pod, Zeroable};
use vulkano::pipeline::graphics::vertex_input::Vertex;
use vulkano::{buffer::BufferContents, pipeline::graphics::vertex_input::Vertex};
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
pub struct Position {
#[format(R32G32B32_SFLOAT)]
position: [f32; 3],
@ -1614,8 +1613,8 @@ pub const POSITIONS: [Position; 531] = [
},
];
#[derive(BufferContents, Vertex)]
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
pub struct Normal {
#[format(R32G32B32_SFLOAT)]
normal: [f32; 3],

View File

@ -2,7 +2,10 @@
name = "vulkano-shaders"
version = "0.32.0"
edition = "2021"
authors = ["Pierre Krieger <pierre.krieger1708@gmail.com>", "The vulkano contributors"]
authors = [
"Pierre Krieger <pierre.krieger1708@gmail.com>",
"The vulkano contributors",
]
repository = "https://github.com/vulkano-rs/vulkano"
description = "Shaders rust code generation macro"
license = "MIT/Apache-2.0"
@ -24,7 +27,5 @@ syn = { version = "1.0", features = ["full", "extra-traits"] }
vulkano = { version = "0.32.0", path = "../vulkano" }
[features]
cgmath = []
nalgebra = []
shaderc-build-from-source = ["shaderc/build-from-source"]
shaderc-debug = []

View File

@ -7,27 +7,31 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::{entry_point, read_file_to_string, structs, LinAlgType, RegisteredType, TypesMeta};
use ahash::HashMap;
use crate::{
entry_point,
structs::{self, TypeRegistry},
MacroInput,
};
use heck::ToSnakeCase;
use proc_macro2::TokenStream;
pub use shaderc::{CompilationArtifact, IncludeType, ResolvedInclude, ShaderKind};
use shaderc::{CompileOptions, Compiler, EnvVersion, SpirvVersion, TargetEnv};
use shaderc::{CompileOptions, Compiler, EnvVersion, TargetEnv};
use std::{
cell::{RefCell, RefMut},
io::Error as IoError,
cell::RefCell,
fs,
iter::Iterator,
path::Path,
path::{Path, PathBuf},
};
use vulkano::shader::{
reflect,
spirv::{Spirv, SpirvError},
use syn::{Error, LitStr};
use vulkano::{
shader::{reflect, spirv::Spirv},
Version,
};
pub(super) fn path_to_str(path: &Path) -> &str {
path.to_str().expect(
"Could not stringify the file to be included. Make sure the path consists of \
valid unicode characters.",
)
pub struct Shader {
pub source: LitStr,
pub name: String,
pub spirv: Spirv,
}
#[allow(clippy::too_many_arguments)]
@ -36,40 +40,39 @@ fn include_callback(
directive_type: IncludeType,
contained_within_path_raw: &str,
recursion_depth: usize,
include_directories: &[impl AsRef<Path>],
include_directories: &[PathBuf],
root_source_has_path: bool,
base_path: &impl AsRef<Path>,
mut includes_tracker: RefMut<'_, Vec<String>>,
base_path: &Path,
includes: &mut Vec<String>,
) -> Result<ResolvedInclude, String> {
let file_to_include = match directive_type {
IncludeType::Relative => {
let requested_source_path = Path::new(requested_source_path_raw);
// Is embedded current shader source embedded within a rust macro?
// If so, abort unless absolute path.
// If the shader source is embedded within the macro, abort unless we get an absolute
// path.
if !root_source_has_path && recursion_depth == 1 && !requested_source_path.is_absolute()
{
let requested_source_name = requested_source_path
.file_name()
.expect("Could not get the name of the requested source file.")
.expect("failed to get the name of the requested source file")
.to_string_lossy();
let requested_source_directory = requested_source_path
.parent()
.expect("Could not get the directory of the requested source file.")
.expect("failed to get the directory of the requested source file")
.to_string_lossy();
return Err(format!(
"Usage of relative paths in imports in embedded GLSL is not \
allowed, try using `#include <{}>` and adding the directory \
`{}` to the `include` array in your `shader!` macro call \
instead.",
requested_source_name, requested_source_directory
"usage of relative paths in imports in embedded GLSL is not allowed, try \
using `#include <{}>` and adding the directory `{}` to the `include` array in \
your `shader!` macro call instead",
requested_source_name, requested_source_directory,
));
}
let mut resolved_path = if recursion_depth == 1 {
Path::new(contained_within_path_raw)
.parent()
.map(|parent| base_path.as_ref().join(parent))
.map(|parent| base_path.join(parent))
} else {
Path::new(contained_within_path_raw)
.parent()
@ -77,17 +80,17 @@ fn include_callback(
}
.unwrap_or_else(|| {
panic!(
"The file `{}` does not reside in a directory. This is \
an implementation error.",
contained_within_path_raw
"the file `{}` does not reside in a directory, this is an implementation \
error",
contained_within_path_raw,
)
});
resolved_path.push(requested_source_path);
if !resolved_path.is_file() {
return Err(format!(
"Invalid inclusion path `{}`, the path does not point to a file.",
requested_source_path_raw
"invalid inclusion path `{}`, the path does not point to a file",
requested_source_path_raw,
));
}
@ -101,79 +104,78 @@ fn include_callback(
// in the relative include directive or when using absolute paths in a standard
// include directive.
return Err(format!(
"No such file found, as specified by the absolute path. \
Keep in mind, that absolute paths cannot be used with \
inclusion from standard directories (`#include <...>`), try \
using `#include \"...\"` instead. Requested path: {}",
requested_source_path_raw
"no such file found as specified by the absolute path; keep in mind that \
absolute paths cannot be used with inclusion from standard directories \
(`#include <...>`), try using `#include \"...\"` instead; requested path: {}",
requested_source_path_raw,
));
}
let found_requested_source_path = include_directories
.iter()
.map(|include_directory| include_directory.as_ref().join(requested_source_path))
.map(|include_directory| include_directory.join(requested_source_path))
.find(|resolved_requested_source_path| resolved_requested_source_path.is_file());
if let Some(found_requested_source_path) = found_requested_source_path {
found_requested_source_path
} else {
return Err(format!(
"Could not include the file `{}` from any include directories.",
requested_source_path_raw
"failed to include the file `{}` from any include directories",
requested_source_path_raw,
));
}
}
};
let file_to_include_string = path_to_str(file_to_include.as_path()).to_string();
let content = read_file_to_string(file_to_include.as_path()).map_err(|_| {
let content = fs::read_to_string(file_to_include.as_path()).map_err(|err| {
format!(
"Could not read the contents of file `{}` to be included in the \
shader source.",
&file_to_include_string
"failed to read the contents of file `{file_to_include:?}` to be included in the \
shader source: {err}",
)
})?;
let resolved_name = file_to_include
.into_os_string()
.into_string()
.map_err(|_| {
"failed to stringify the file to be included; make sure the path consists of valid \
unicode characters"
})?;
includes_tracker.push(file_to_include_string.clone());
includes.push(resolved_name.clone());
Ok(ResolvedInclude {
resolved_name: file_to_include_string,
resolved_name,
content,
})
}
#[allow(clippy::too_many_arguments)]
pub fn compile(
pub(super) fn compile(
input: &MacroInput,
path: Option<String>,
base_path: &impl AsRef<Path>,
base_path: &Path,
code: &str,
ty: ShaderKind,
include_directories: &[impl AsRef<Path>],
macro_defines: &[(impl AsRef<str>, impl AsRef<str>)],
vulkan_version: Option<EnvVersion>,
spirv_version: Option<SpirvVersion>,
shader_kind: ShaderKind,
) -> Result<(CompilationArtifact, Vec<String>), String> {
let includes_tracker = RefCell::new(Vec::new());
let includes = RefCell::new(Vec::new());
let compiler = Compiler::new().ok_or("failed to create GLSL compiler")?;
let mut compile_options = CompileOptions::new().ok_or("failed to initialize compile option")?;
let mut compile_options =
CompileOptions::new().ok_or("failed to initialize compile options")?;
compile_options.set_target_env(
TargetEnv::Vulkan,
vulkan_version.unwrap_or(EnvVersion::Vulkan1_0) as u32,
input.vulkan_version.unwrap_or(EnvVersion::Vulkan1_0) as u32,
);
if let Some(spirv_version) = spirv_version {
if let Some(spirv_version) = input.spirv_version {
compile_options.set_target_spirv(spirv_version);
}
let root_source_path = if let &Some(ref path) = &path {
path
} else {
// An arbitrary placeholder file name for embedded shaders
"shader.glsl"
};
let root_source_path = path.as_deref().unwrap_or(
// An arbitrary placeholder file name for embedded shaders.
"shader.glsl",
);
// Specify file resolution callback for the `#include` directive
// Specify the file resolution callback for the `#include` directive.
compile_options.set_include_callback(
|requested_source_path, directive_type, contained_within_path, recursion_depth| {
include_callback(
@ -181,52 +183,68 @@ pub fn compile(
directive_type,
contained_within_path,
recursion_depth,
include_directories,
&input.include_directories,
path.is_some(),
base_path,
includes_tracker.borrow_mut(),
&mut includes.borrow_mut(),
)
},
);
for (macro_name, macro_value) in macro_defines.iter() {
compile_options.add_macro_definition(macro_name.as_ref(), Some(macro_value.as_ref()));
for (macro_name, macro_value) in &input.macro_defines {
compile_options.add_macro_definition(macro_name, Some(macro_value));
}
#[cfg(feature = "shaderc-debug")]
compile_options.set_generate_debug_info();
let content = compiler
.compile_into_spirv(code, ty, root_source_path, "main", Some(&compile_options))
.map_err(|e| e.to_string())?;
.compile_into_spirv(
code,
shader_kind,
root_source_path,
"main",
Some(&compile_options),
)
.map_err(|e| e.to_string().replace("(s): ", "(s):\n"))?;
let includes = includes_tracker.borrow().clone();
drop(compile_options);
Ok((content, includes))
Ok((content, includes.into_inner()))
}
pub(super) fn reflect<'a, L: LinAlgType>(
prefix: &'a str,
pub(super) fn reflect(
input: &MacroInput,
source: LitStr,
name: String,
words: &[u32],
types_meta: &TypesMeta,
input_paths: impl IntoIterator<Item = &'a str>,
shared_constants: bool,
types_registry: &'a mut HashMap<String, RegisteredType>,
input_paths: Vec<String>,
type_registry: &mut TypeRegistry,
) -> Result<(TokenStream, TokenStream), Error> {
let spirv = Spirv::new(words)?;
let spirv = Spirv::new(words).map_err(|err| {
Error::new_spanned(&source, format!("failed to parse SPIR-V words: {err}"))
})?;
let shader = Shader {
source,
name,
spirv,
};
let include_bytes = input_paths.into_iter().map(|s| {
quote! {
// using include_bytes here ensures that changing the shader will force recompilation.
// Using `include_bytes` here ensures that changing the shader will force recompilation.
// The bytes themselves can be optimized out by the compiler as they are unused.
::std::include_bytes!( #s )
}
});
let spirv_version = {
let major = spirv.version().major;
let minor = spirv.version().minor;
let patch = spirv.version().patch;
let Version {
major,
minor,
patch,
} = shader.spirv.version();
quote! {
::vulkano::Version {
major: #major,
@ -235,34 +253,33 @@ pub(super) fn reflect<'a, L: LinAlgType>(
}
}
};
let spirv_capabilities = reflect::spirv_capabilities(&spirv).map(|capability| {
let spirv_capabilities = reflect::spirv_capabilities(&shader.spirv).map(|capability| {
let name = format_ident!("{}", format!("{:?}", capability));
quote! { &::vulkano::shader::spirv::Capability::#name }
});
let spirv_extensions = reflect::spirv_extensions(&spirv);
let entry_points = reflect::entry_points(&spirv)
let spirv_extensions = reflect::spirv_extensions(&shader.spirv);
let entry_points = reflect::entry_points(&shader.spirv)
.map(|(name, model, info)| entry_point::write_entry_point(&name, model, &info));
let specialization_constants = structs::write_specialization_constants::<L>(
prefix,
&spirv,
shared_constants,
types_registry,
);
let specialization_constants =
structs::write_specialization_constants(input, &shader, type_registry)?;
let load_name = if prefix.is_empty() {
let load_name = if shader.name.is_empty() {
format_ident!("load")
} else {
format_ident!("load_{}", prefix)
format_ident!("load_{}", shader.name.to_snake_case())
};
let shader_code = quote! {
/// Loads the shader in Vulkan as a `ShaderModule`.
#[inline]
/// Loads the shader as a `ShaderModule`.
#[allow(unsafe_code)]
pub fn #load_name(device: ::std::sync::Arc<::vulkano::device::Device>)
-> Result<::std::sync::Arc<::vulkano::shader::ShaderModule>, ::vulkano::shader::ShaderCreationError>
{
#[inline]
pub fn #load_name(
device: ::std::sync::Arc<::vulkano::device::Device>,
) -> ::std::result::Result<
::std::sync::Arc<::vulkano::shader::ShaderModule>,
::vulkano::shader::ShaderCreationError,
> {
let _bytes = ( #( #include_bytes ),* );
static WORDS: &[u32] = &[ #( #words ),* ];
@ -282,51 +299,19 @@ pub(super) fn reflect<'a, L: LinAlgType>(
#specialization_constants
};
let structs = structs::write_structs::<L>(prefix, &spirv, types_meta, types_registry);
let structs = structs::write_structs(input, &shader, type_registry)?;
Ok((shader_code, structs))
}
#[derive(Debug)]
pub enum Error {
IoError(IoError),
SpirvError(SpirvError),
}
impl From<IoError> for Error {
fn from(err: IoError) -> Error {
Error::IoError(err)
}
}
impl From<SpirvError> for Error {
fn from(err: SpirvError) -> Error {
Error::SpirvError(err)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{codegen::compile, StdArray};
use shaderc::ShaderKind;
use std::path::{Path, PathBuf};
use vulkano::shader::{reflect, spirv::Spirv};
#[cfg(not(target_os = "windows"))]
pub fn path_separator() -> &'static str {
"/"
}
#[cfg(target_os = "windows")]
pub fn path_separator() -> &'static str {
"\\"
}
fn convert_paths(root_path: &Path, paths: &[String]) -> Vec<String> {
fn convert_paths(root_path: &Path, paths: &[PathBuf]) -> Vec<String> {
paths
.iter()
.map(|p| path_to_str(root_path.join(p).as_path()).to_owned())
.map(|p| root_path.join(p).into_os_string().into_string().unwrap())
.collect()
}
@ -344,194 +329,88 @@ mod tests {
}
#[test]
fn test_bad_alignment() {
// vec3/mat3/mat3x* are problematic in arrays since their rust
// representations don't have the same array stride as the SPIR-V
// ones. E.g. in a vec3[2], the second element starts on the 16th
// byte, but in a rust [[f32;3];2], the second element starts on the
// 12th byte. Since we can't generate code for these types, we should
// create an error instead of generating incorrect code.
let includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (comp, _) = compile(
None,
&Path::new(""),
"
#version 450
struct MyStruct {
vec3 vs[2];
};
layout(binding=0) uniform UBO {
MyStruct s;
};
void main() {}
",
ShaderKind::Vertex,
&includes,
&defines,
None,
None,
)
.unwrap();
let spirv = Spirv::new(comp.as_binary()).unwrap();
let res = std::panic::catch_unwind(|| {
structs::write_structs::<StdArray>(
"",
&spirv,
&TypesMeta::default(),
&mut HashMap::default(),
)
});
assert!(res.is_err());
}
#[test]
fn test_trivial_alignment() {
let includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (comp, _) = compile(
None,
&Path::new(""),
"
#version 450
struct MyStruct {
vec4 vs[2];
};
layout(binding=0) uniform UBO {
MyStruct s;
};
void main() {}
",
ShaderKind::Vertex,
&includes,
&defines,
None,
None,
)
.unwrap();
let spirv = Spirv::new(comp.as_binary()).unwrap();
structs::write_structs::<StdArray>(
"",
&spirv,
&TypesMeta::default(),
&mut HashMap::default(),
);
}
#[test]
fn test_wrap_alignment() {
// This is a workaround suggested in the case of test_bad_alignment,
// so we should make sure it works.
let includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (comp, _) = compile(
None,
&Path::new(""),
"
#version 450
struct Vec3Wrap {
vec3 v;
};
struct MyStruct {
Vec3Wrap vs[2];
};
layout(binding=0) uniform UBO {
MyStruct s;
};
void main() {}
",
ShaderKind::Vertex,
&includes,
&defines,
None,
None,
)
.unwrap();
let spirv = Spirv::new(comp.as_binary()).unwrap();
structs::write_structs::<StdArray>(
"",
&spirv,
&TypesMeta::default(),
&mut HashMap::default(),
);
}
#[test]
fn test_include_resolution() {
fn include_resolution() {
let root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let empty_includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
let (_compile_relative, _) = compile(
&MacroInput::empty(),
Some(String::from("tests/include_test.glsl")),
&root_path,
"
r#"
#version 450
#include \"include_dir_a/target_a.glsl\"
#include \"include_dir_b/target_b.glsl\"
#include "include_dir_a/target_a.glsl"
#include "include_dir_b/target_b.glsl"
void main() {}
",
"#,
ShaderKind::Vertex,
&empty_includes,
&defines,
None,
None,
)
.expect("Cannot resolve include files");
.expect("cannot resolve include files");
let (_compile_include_paths, includes) = compile(
&MacroInput {
include_directories: vec![
root_path.join("tests").join("include_dir_a"),
root_path.join("tests").join("include_dir_b"),
],
..MacroInput::empty()
},
Some(String::from("tests/include_test.glsl")),
&root_path,
"
r#"
#version 450
#include <target_a.glsl>
#include <target_b.glsl>
void main() {}
",
"#,
ShaderKind::Vertex,
&[
root_path.join("tests").join("include_dir_a"),
root_path.join("tests").join("include_dir_b"),
],
&defines,
None,
None,
)
.expect("Cannot resolve include files");
.expect("cannot resolve include files");
assert_eq!(
includes,
convert_paths(
&root_path,
&[
vec!["tests", "include_dir_a", "target_a.glsl"].join(path_separator()),
vec!["tests", "include_dir_b", "target_b.glsl"].join(path_separator()),
]
)
["tests", "include_dir_a", "target_a.glsl"]
.into_iter()
.collect(),
["tests", "include_dir_b", "target_b.glsl"]
.into_iter()
.collect(),
],
),
);
let (_compile_include_paths_with_relative, includes_with_relative) = compile(
&MacroInput {
include_directories: vec![root_path.join("tests").join("include_dir_a")],
..MacroInput::empty()
},
Some(String::from("tests/include_test.glsl")),
&root_path,
"
r#"
#version 450
#include <target_a.glsl>
#include <../include_dir_b/target_b.glsl>
void main() {}
",
"#,
ShaderKind::Vertex,
&[root_path.join("tests").join("include_dir_a")],
&defines,
None,
None,
)
.expect("Cannot resolve include files");
.expect("cannot resolve include files");
assert_eq!(
includes_with_relative,
convert_paths(
&root_path,
&[
vec!["tests", "include_dir_a", "target_a.glsl"].join(path_separator()),
vec!["tests", "include_dir_a", "../include_dir_b/target_b.glsl"]
.join(path_separator()),
]
)
["tests", "include_dir_a", "target_a.glsl"]
.into_iter()
.collect(),
["tests", "include_dir_a", "../include_dir_b/target_b.glsl"]
.into_iter()
.collect(),
],
),
);
let absolute_path = root_path
@ -540,99 +419,99 @@ mod tests {
.join("target_a.glsl");
let absolute_path_str = absolute_path
.to_str()
.expect("Cannot run tests in a folder with non unicode characters");
.expect("cannot run tests in a folder with non unicode characters");
let (_compile_absolute_path, includes_absolute_path) = compile(
&MacroInput::empty(),
Some(String::from("tests/include_test.glsl")),
&root_path,
&format!(
"
r#"
#version 450
#include \"{}\"
#include "{absolute_path_str}"
void main() {{}}
",
absolute_path_str
"#,
),
ShaderKind::Vertex,
&empty_includes,
&defines,
None,
None,
)
.expect("Cannot resolve include files");
.expect("cannot resolve include files");
assert_eq!(
includes_absolute_path,
convert_paths(
&root_path,
&[vec!["tests", "include_dir_a", "target_a.glsl"].join(path_separator())]
)
&[["tests", "include_dir_a", "target_a.glsl"]
.into_iter()
.collect()],
),
);
let (_compile_recursive_, includes_recursive) = compile(
Some(String::from("tests/include_test.glsl")),
&root_path,
"
#version 450
#include <target_c.glsl>
void main() {}
",
ShaderKind::Vertex,
&[
&MacroInput {
include_directories: vec![
root_path.join("tests").join("include_dir_b"),
root_path.join("tests").join("include_dir_c"),
],
&defines,
None,
None,
..MacroInput::empty()
},
Some(String::from("tests/include_test.glsl")),
&root_path,
r#"
#version 450
#include <target_c.glsl>
void main() {}
"#,
ShaderKind::Vertex,
)
.expect("Cannot resolve include files");
.expect("cannot resolve include files");
assert_eq!(
includes_recursive,
convert_paths(
&root_path,
&[
vec!["tests", "include_dir_c", "target_c.glsl"].join(path_separator()),
vec!["tests", "include_dir_c", "../include_dir_a/target_a.glsl"]
.join(path_separator()),
vec!["tests", "include_dir_b", "target_b.glsl"].join(path_separator()),
]
)
["tests", "include_dir_c", "target_c.glsl"]
.into_iter()
.collect(),
["tests", "include_dir_c", "../include_dir_a/target_a.glsl"]
.into_iter()
.collect(),
["tests", "include_dir_b", "target_b.glsl"]
.into_iter()
.collect(),
],
),
);
}
#[test]
fn test_macros() {
let empty_includes: [PathBuf; 0] = [];
let defines = vec![("NAME1", ""), ("NAME2", "58")];
let no_defines: [(String, String); 0] = [];
let need_defines = "
fn macros() {
let need_defines = r#"
#version 450
#if defined(NAME1) && NAME2 > 29
void main() {}
#endif
";
"#;
let compile_no_defines = compile(
&MacroInput::empty(),
None,
&Path::new(""),
Path::new(""),
need_defines,
ShaderKind::Vertex,
&empty_includes,
&no_defines,
None,
None,
);
assert!(compile_no_defines.is_err());
let compile_defines = compile(
compile(
&MacroInput {
macro_defines: vec![("NAME1".into(), "".into()), ("NAME2".into(), "58".into())],
..MacroInput::empty()
},
None,
&Path::new(""),
Path::new(""),
need_defines,
ShaderKind::Vertex,
&empty_includes,
&defines,
None,
None,
);
compile_defines.expect("Setting shader macros did not work");
)
.expect("setting shader macros did not work");
}
/// `entrypoint1.frag.glsl`:
@ -699,7 +578,7 @@ mod tests {
/// spirv-link entrypoint1.spv entrypoint2.spv -o multiple_entrypoints.spv
/// ```
#[test]
fn test_descriptor_calculation_with_multiple_entrypoints() {
fn descriptor_calculation_with_multiple_entrypoints() {
let data = include_bytes!("../tests/multiple_entrypoints.spv");
let instructions: Vec<u32> = data
.chunks(4)
@ -715,11 +594,12 @@ mod tests {
}
// Check first entrypoint
let e1_descriptors = descriptors.get(0).expect("Could not find entrypoint1");
let e1_descriptors = descriptors.get(0).expect("could not find entrypoint1");
let mut e1_bindings = Vec::new();
for loc in e1_descriptors.keys() {
e1_bindings.push(*loc);
}
assert_eq!(e1_bindings.len(), 5);
assert!(e1_bindings.contains(&(0, 0)));
assert!(e1_bindings.contains(&(0, 1)));
@ -728,11 +608,12 @@ mod tests {
assert!(e1_bindings.contains(&(0, 4)));
// Check second entrypoint
let e2_descriptors = descriptors.get(1).expect("Could not find entrypoint2");
let e2_descriptors = descriptors.get(1).expect("could not find entrypoint2");
let mut e2_bindings = Vec::new();
for loc in e2_descriptors.keys() {
e2_bindings.push(*loc);
}
assert_eq!(e2_bindings.len(), 3);
assert!(e2_bindings.contains(&(0, 0)));
assert!(e2_bindings.contains(&(0, 1)));
@ -740,13 +621,12 @@ mod tests {
}
#[test]
fn test_descriptor_calculation_with_multiple_functions() {
let includes: [PathBuf; 0] = [];
let defines: [(String, String); 0] = [];
fn descriptor_calculation_with_multiple_functions() {
let (comp, _) = compile(
&MacroInput::empty(),
None,
&Path::new(""),
"
Path::new(""),
r#"
#version 450
layout(set = 1, binding = 0) buffer Buffer {
@ -771,12 +651,8 @@ mod tests {
void main() {
bo.data = makeSecretSauce();
}
",
"#,
ShaderKind::Vertex,
&includes,
&defines,
None,
None,
)
.unwrap();
let spirv = Spirv::new(comp.as_binary()).unwrap();
@ -786,6 +662,7 @@ mod tests {
for (loc, _reqs) in info.descriptor_binding_requirements {
bindings.push(loc);
}
assert_eq!(bindings.len(), 4);
assert!(bindings.contains(&(1, 0)));
assert!(bindings.contains(&(2, 0)));
@ -794,6 +671,6 @@ mod tests {
return;
}
panic!("Could not find entrypoint");
panic!("could not find entrypoint");
}
}

View File

@ -8,7 +8,7 @@
// according to those terms.
use ahash::HashMap;
use proc_macro2::TokenStream;
use proc_macro2::{Ident, Span, TokenStream};
use vulkano::{
pipeline::layout::PushConstantRange,
shader::{
@ -25,11 +25,7 @@ pub(super) fn write_entry_point(
info: &EntryPointInfo,
) -> TokenStream {
let execution = write_shader_execution(&info.execution);
let model = syn::parse_str::<syn::Path>(&format!(
"::vulkano::shader::spirv::ExecutionModel::{:?}",
model
))
.unwrap();
let model = Ident::new(&format!("{:?}", model), Span::call_site());
let descriptor_binding_requirements =
write_descriptor_binding_requirements(&info.descriptor_binding_requirements);
let push_constant_requirements =
@ -42,7 +38,7 @@ pub(super) fn write_entry_point(
quote! {
(
#name.to_owned(),
#model,
::vulkano::shader::spirv::ExecutionModel::#model,
::vulkano::shader::EntryPointInfo {
execution: #execution,
descriptor_binding_requirements: #descriptor_binding_requirements.into_iter().collect(),
@ -194,7 +190,9 @@ fn write_descriptor_binding_requirements(
sampler_compare: #sampler_compare,
sampler_no_unnormalized_coordinates: #sampler_no_unnormalized_coordinates,
sampler_no_ycbcr_conversion: #sampler_no_ycbcr_conversion,
sampler_with_images: [#(#sampler_with_images_items),*].into_iter().collect(),
sampler_with_images: [ #( #sampler_with_images_items ),* ]
.into_iter()
.collect(),
storage_image_atomic: #storage_image_atomic,
}
)
@ -214,13 +212,13 @@ fn write_descriptor_binding_requirements(
stages: #stages,
descriptors: [ #( #descriptor_items ),* ].into_iter().collect(),
},
),
)
}
});
quote! {
[
#( #descriptor_binding_requirements )*
#( #descriptor_binding_requirements ),*
]
}
}
@ -264,13 +262,13 @@ fn write_specialization_constant_requirements(
::vulkano::shader::SpecializationConstantRequirements {
size: #size,
},
),
)
}
});
quote! {
[
#( #specialization_constant_requirements )*
#( #specialization_constant_requirements ),*
]
}
}
@ -301,15 +299,15 @@ fn write_interface(interface: &ShaderInterface) -> TokenStream {
num_elements: #num_elements,
is_64bit: #is_64bit,
},
name: Some(::std::borrow::Cow::Borrowed(#name))
},
name: ::std::option::Option::Some(::std::borrow::Cow::Borrowed(#name)),
}
}
},
);
quote! {
::vulkano::shader::ShaderInterface::new_unchecked(vec![
#( #items )*
#( #items ),*
])
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -21,18 +21,15 @@ ahash = "0.8"
# When updating Ash, also update vk.xml to the same Vulkan patch version that Ash uses.
# All versions of vk.xml can be found at https://github.com/KhronosGroup/Vulkan-Headers/commits/main/registry/vk.xml.
ash = "^0.37.1"
bytemuck = { version = "1.7", features = [
"derive",
"extern_crate_std",
"min_const_generics",
] }
bytemuck = "1.7"
cgmath = { version = "0.18.0", optional = true }
crossbeam-queue = "0.3"
half = "2"
half = { version = "2", features = ["bytemuck"] }
libloading = "0.7"
nalgebra = { version = "0.31.0", optional = true }
once_cell = "1.16"
parking_lot = { version = "0.12", features = ["send_guard"] }
serde = { version = "1.0", optional = true }
smallvec = "1.8"
thread_local = "1.1"
vulkano-macros = { path = "macros", version = "0.32.0" }

View File

@ -21,3 +21,6 @@ syn = "1.0"
quote = "1.0"
proc-macro2 = "1.0"
proc-macro-crate = "1.2"
[dev-dependencies]
vulkano = { path = ".." }

View File

@ -0,0 +1,329 @@
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::bail;
use proc_macro2::TokenStream;
use quote::{quote, quote_spanned};
use syn::{
parse_quote, spanned::Spanned, Data, DeriveInput, Fields, FieldsNamed, FieldsUnnamed, Ident,
Meta, MetaList, NestedMeta, Result, Type, TypeArray, TypeSlice, WherePredicate,
};
pub fn derive_buffer_contents(mut ast: DeriveInput) -> Result<TokenStream> {
let crate_ident = crate::crate_ident();
let struct_ident = &ast.ident;
if !ast
.attrs
.iter()
.filter_map(|attr| {
attr.path
.is_ident("repr")
.then(|| attr.parse_meta().unwrap())
})
.any(|meta| match meta {
Meta::List(MetaList { nested, .. }) => {
nested.iter().any(|nested_meta| match nested_meta {
NestedMeta::Meta(Meta::Path(path)) => {
path.is_ident("C") || path.is_ident("transparent")
}
_ => false,
})
}
_ => false,
})
{
bail!(
"deriving `BufferContents` is only supported for types that are marked `#[repr(C)]` \
or `#[repr(transparent)]`",
);
}
let (impl_generics, type_generics, where_clause) = {
let predicates = ast
.generics
.type_params()
.map(|ty| {
parse_quote! { #ty: ::#crate_ident::buffer::BufferContents }
})
.collect::<Vec<WherePredicate>>();
ast.generics
.make_where_clause()
.predicates
.extend(predicates);
ast.generics.split_for_impl()
};
let layout = write_layout(&crate_ident, &ast)?;
Ok(quote! {
#[allow(unsafe_code)]
unsafe impl #impl_generics ::#crate_ident::buffer::BufferContents
for #struct_ident #type_generics #where_clause
{
const LAYOUT: ::#crate_ident::buffer::BufferContentsLayout = #layout;
#[inline(always)]
unsafe fn from_ffi(data: *mut ::std::ffi::c_void, range: usize) -> *mut Self {
#[repr(C)]
union PtrRepr<T: ?Sized> {
components: PtrComponents,
ptr: *mut T,
}
#[derive(Clone, Copy)]
#[repr(C)]
struct PtrComponents {
data: *mut ::std::ffi::c_void,
len: usize,
}
let alignment = <Self as ::#crate_ident::buffer::BufferContents>::LAYOUT
.alignment()
.as_devicesize() as usize;
::std::debug_assert!(data as usize % alignment == 0);
let head_size = <Self as ::#crate_ident::buffer::BufferContents>::LAYOUT
.head_size() as usize;
let element_size = <Self as ::#crate_ident::buffer::BufferContents>::LAYOUT
.element_size()
.unwrap_or(1) as usize;
::std::debug_assert!(range >= head_size);
let tail_size = range - head_size;
::std::debug_assert!(tail_size % element_size == 0);
let len = tail_size / element_size;
let components = PtrComponents { data, len };
// SAFETY: All fields must implement `BufferContents`. The last field, if it is
// unsized, must therefore be a slice or a DST derived from a slice. It cannot be
// any other kind of DST, unless unsafe code was used to achieve that.
//
// That means we can safely rely on knowing what kind of DST the implementing type
// is, but it doesn't tell us what the correct representation for the pointer of
// this kind of DST is. For that we have to rely on what the docs tell us, namely
// that for structs where the last field is a DST, the metadata is the same as the
// last field's. We also know that the metadata of a slice is its length measured
// in the number of elements. This tells us that the components of a pointer to the
// implementing type are the address to the start of the data, and a length. It
// still does not tell us what the representation of the pointer is though.
//
// In fact, there is no way to be certain that this representation is correct.
// *Theoretically* rustc could decide tomorrow that the metadata comes first and
// the address comes last, but the chance of that ever happening is zero.
//
// But what if the implementing type is actually sized? In that case this
// conversion will simply discard the length field, and only leave the pointer.
PtrRepr { components }.ptr
}
}
})
}
fn write_layout(crate_ident: &Ident, ast: &DeriveInput) -> Result<TokenStream> {
let data = match &ast.data {
Data::Struct(data) => data,
Data::Enum(_) => bail!("deriving `BufferContents` for enums is not supported"),
Data::Union(_) => bail!("deriving `BufferContents` for unions is not supported"),
};
let fields = match &data.fields {
Fields::Named(FieldsNamed { named, .. }) => named,
Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => unnamed,
Fields::Unit => bail!("zero-sized types are not valid buffer contents"),
};
let mut field_types = fields.iter().map(|field| &field.ty);
let last_field_type = field_types.next_back().unwrap();
let mut layout = quote! { ::std::alloc::Layout::new::<()>() };
let mut bound_types = Vec::new();
// Construct the layout of the head and accumulate the types that have to implement
// `BufferContents` in order for the struct to implement the trait as well.
for field_type in field_types {
bound_types.push(find_innermost_element_type(field_type));
layout = quote! {
extend_layout(#layout, ::std::alloc::Layout::new::<#field_type>())
};
}
// The last field needs special treatment.
match last_field_type {
// An array might not implement `BufferContents` depending on the element, and therefore we
// can't use `BufferContents::extend_from_layout` on it.
Type::Array(TypeArray { elem, .. }) => {
bound_types.push(find_innermost_element_type(elem));
layout = quote! {
::#crate_ident::buffer::BufferContentsLayout::from_sized(
::std::alloc::Layout::new::<Self>()
)
};
}
// A slice might contain an array same as above, and therefore we can't use
// `BufferContents::extend_from_layout` on it either.
Type::Slice(TypeSlice { elem, .. }) => {
bound_types.push(find_innermost_element_type(elem));
layout = quote! {
::#crate_ident::buffer::BufferContentsLayout::from_head_element_layout(
#layout,
::std::alloc::Layout::new::<#elem>(),
)
};
}
ty => {
bound_types.push(ty);
layout = quote! {
<#last_field_type as ::#crate_ident::buffer::BufferContents>::LAYOUT
.extend_from_layout(&#layout)
};
}
}
let (impl_generics, _, where_clause) = ast.generics.split_for_impl();
let bounds = bound_types.into_iter().map(|ty| {
quote_spanned! { ty.span() =>
{
// HACK: This works around Rust issue #48214, which makes it impossible to put
// these bounds in the where clause of the trait implementation where they actually
// belong until that is resolved.
#[allow(unused)]
fn bound #impl_generics () #where_clause {
fn assert_impl<T: ::#crate_ident::buffer::BufferContents + ?Sized>() {}
assert_impl::<#ty>();
}
}
}
});
let layout = quote! {
{
#( #bounds )*
// HACK: Very depressingly, `Layout::extend` is not const.
const fn extend_layout(
layout: ::std::alloc::Layout,
next: ::std::alloc::Layout,
) -> ::std::alloc::Layout {
let padded_size = if let Some(val) =
layout.size().checked_add(next.align() - 1)
{
val & !(next.align() - 1)
} else {
::std::unreachable!()
};
// TODO: Replace with `Ord::max` once its constness is stabilized.
let align = if layout.align() >= next.align() {
layout.align()
} else {
next.align()
};
if let Some(size) = padded_size.checked_add(next.size()) {
if let Ok(layout) = ::std::alloc::Layout::from_size_align(size, align) {
layout
} else {
::std::unreachable!()
}
} else {
::std::unreachable!()
}
}
if let Some(layout) = #layout {
if let Some(layout) = layout.pad_to_alignment() {
layout
} else {
::std::unreachable!()
}
} else {
::std::panic!("zero-sized types are not valid buffer contents")
}
}
};
Ok(layout)
}
// HACK: This works around an inherent limitation of bytemuck, namely that an array where the
// element is `AnyBitPattern` is itself not `AnyBitPattern`, by only requiring that the innermost
// type in the array implements `BufferContents`.
fn find_innermost_element_type(mut field_type: &Type) -> &Type {
while let Type::Array(TypeArray { elem, .. }) = field_type {
field_type = elem;
}
field_type
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn repr() {
let default_repr = parse_quote! {
struct Test(u8, [u8]);
};
assert!(derive_buffer_contents(default_repr).is_err());
let irellevant_reprs = parse_quote! {
#[repr(packed(2), align(16))]
struct Test(u8, [u8]);
};
assert!(derive_buffer_contents(irellevant_reprs).is_err());
let transparent_repr = parse_quote! {
#[repr(transparent)]
struct Test([u8]);
};
assert!(derive_buffer_contents(transparent_repr).is_ok());
let multiple_reprs = parse_quote! {
#[repr(align(16))]
#[repr(C)]
#[repr(packed)]
struct Test(u8, [u8]);
};
assert!(derive_buffer_contents(multiple_reprs).is_ok());
}
#[test]
fn zero_sized() {
let unit = parse_quote! {
struct Test;
};
assert!(derive_buffer_contents(unit).is_err());
}
#[test]
fn unsupported_datatype() {
let enum_ = parse_quote! {
#[repr(C)]
enum Test { A, B, C }
};
assert!(derive_buffer_contents(enum_).is_err());
let union = parse_quote! {
#[repr(C)]
union Test {
a: u32,
b: f32,
}
};
assert!(derive_buffer_contents(union).is_err());
}
}

View File

@ -7,14 +7,13 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use proc_macro::TokenStream;
use proc_macro2::Span;
use proc_macro_crate::{crate_name, FoundCrate};
use crate::bail;
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{
parse::{Parse, ParseStream},
punctuated::Punctuated,
Data, DataStruct, Error, Fields, Ident, LitStr, Result, Token,
Data, DataStruct, Fields, Ident, LitStr, Result, Token,
};
pub fn derive_vertex(ast: syn::DeriveInput) -> Result<TokenStream> {
@ -25,25 +24,14 @@ pub fn derive_vertex(ast: syn::DeriveInput) -> Result<TokenStream> {
fields: Fields::Named(fields),
..
}) => &fields.named,
_ => {
return Err(Error::new_spanned(
ast,
"Expected a struct with named fields",
));
}
_ => bail!("expected a struct with named fields"),
};
let found_crate = crate_name("vulkano").expect("vulkano is present in `Cargo.toml`");
let crate_ident = match found_crate {
// We use `vulkano` by default as we are exporting crate as vulkano in vulkano/lib.rs.
FoundCrate::Itself => Ident::new("vulkano", Span::call_site()),
FoundCrate::Name(name) => Ident::new(&name, Span::call_site()),
};
let crate_ident = crate::crate_ident();
let mut members = quote! {
let mut offset = 0;
let mut members = HashMap::default();
let mut members = ::std::collections::HashMap::default();
};
for field in fields.iter() {
@ -64,30 +52,30 @@ pub fn derive_vertex(ast: syn::DeriveInput) -> Result<TokenStream> {
} else if attr_ident == "format" {
let format_ident = attr.parse_args_with(Ident::parse)?;
format = quote! {
let format = Format::#format_ident;
let format = ::#crate_ident::format::Format::#format_ident;
};
}
}
if format.is_empty() {
return Err(Error::new(
field_name.span(),
"Expected `#[format(...)]`-attribute with valid `vulkano::format::Format`",
));
bail!(
field_name,
"expected `#[format(...)]`-attribute with valid `vulkano::format::Format`",
);
}
for name in &names {
members = quote! {
#members
let field_size = std::mem::size_of::<#field_ty>() as u32;
let field_size = ::std::mem::size_of::<#field_ty>() as u32;
{
#format
let format_size = format.block_size().expect("no block size for format") as u32;
let num_elements = field_size / format_size;
let remainder = field_size % format_size;
assert!(remainder == 0, "struct field `{}` size does not fit multiple of format size", #field_name_lit);
::std::assert!(remainder == 0, "struct field `{}` size does not fit multiple of format size", #field_name_lit);
members.insert(
#name.to_string(),
VertexMemberInfo {
::#crate_ident::pipeline::graphics::vertex_input::VertexMemberInfo {
offset,
format,
num_elements,
@ -100,37 +88,34 @@ pub fn derive_vertex(ast: syn::DeriveInput) -> Result<TokenStream> {
}
let function_body = quote! {
#[allow(unused_imports)]
use std::collections::HashMap;
use #crate_ident::format::Format;
use #crate_ident::pipeline::graphics::vertex_input::{VertexInputRate, VertexMemberInfo};
#members
#crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
::#crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
members,
stride: std::mem::size_of::<#struct_name>() as u32,
input_rate: VertexInputRate::Vertex,
stride: ::std::mem::size_of::<#struct_name>() as u32,
input_rate: ::#crate_ident::pipeline::graphics::vertex_input::VertexInputRate::Vertex,
}
};
Ok(TokenStream::from(quote! {
Ok(quote! {
#[allow(unsafe_code)]
unsafe impl #crate_ident::pipeline::graphics::vertex_input::Vertex for #struct_name {
unsafe impl ::#crate_ident::pipeline::graphics::vertex_input::Vertex for #struct_name {
#[inline(always)]
fn per_vertex() -> #crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
fn per_vertex() -> ::#crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
#function_body
}
#[inline(always)]
fn per_instance() -> #crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
#function_body.per_instance()
fn per_instance() -> ::#crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
Self::per_vertex().per_instance()
}
#[inline(always)]
fn per_instance_with_divisor(divisor: u32) -> #crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
#function_body.per_instance_with_divisor(divisor)
fn per_instance_with_divisor(divisor: u32) -> ::#crate_ident::pipeline::graphics::vertex_input::VertexBufferDescription {
Self::per_vertex().per_instance_with_divisor(divisor)
}
}
}))
})
}
struct NameMeta {

View File

@ -8,12 +8,47 @@
// according to those terms.
use proc_macro::TokenStream;
use syn::{parse_macro_input, DeriveInput};
use proc_macro_crate::{crate_name, FoundCrate};
use syn::{parse_macro_input, DeriveInput, Error};
mod derive_buffer_contents;
mod derive_vertex;
#[proc_macro_derive(Vertex, attributes(name, format))]
pub fn proc_derive_vertex(input: TokenStream) -> TokenStream {
pub fn derive_vertex(input: TokenStream) -> TokenStream {
let ast = parse_macro_input!(input as DeriveInput);
derive_vertex::derive_vertex(ast).unwrap_or_else(|err| err.to_compile_error().into())
derive_vertex::derive_vertex(ast)
.unwrap_or_else(Error::into_compile_error)
.into()
}
#[proc_macro_derive(BufferContents)]
pub fn derive_buffer_contents(input: TokenStream) -> TokenStream {
let ast = parse_macro_input!(input as DeriveInput);
derive_buffer_contents::derive_buffer_contents(ast)
.unwrap_or_else(Error::into_compile_error)
.into()
}
fn crate_ident() -> syn::Ident {
let found_crate = crate_name("vulkano").unwrap();
let name = match &found_crate {
// We use `vulkano` by default as we are exporting crate as vulkano in vulkano/lib.rs.
FoundCrate::Itself => "vulkano",
FoundCrate::Name(name) => name,
};
syn::Ident::new(name, proc_macro2::Span::call_site())
}
macro_rules! bail {
($msg:expr $(,)?) => {
return Err(syn::Error::new(proc_macro2::Span::call_site(), $msg))
};
($span:expr, $msg:expr $(,)?) => {
return Err(syn::Error::new_spanned($span, $msg))
};
}
use bail;

View File

@ -9,7 +9,7 @@
//! Efficiently suballocates buffers into smaller subbuffers.
use super::{Buffer, BufferError, BufferMemory, BufferUsage, Subbuffer};
use super::{Buffer, BufferContents, BufferError, BufferMemory, BufferUsage, Subbuffer};
use crate::{
buffer::BufferAllocateInfo,
device::{Device, DeviceOwned},
@ -17,11 +17,10 @@ use crate::{
align_up, AllocationCreationError, DeviceAlignment, DeviceLayout, MemoryAllocator,
MemoryUsage, StandardMemoryAllocator,
},
DeviceSize,
DeviceSize, NonZeroDeviceSize,
};
use crossbeam_queue::ArrayQueue;
use std::{
alloc::Layout,
cell::UnsafeCell,
cmp,
hash::{Hash, Hasher},
@ -196,36 +195,50 @@ where
}
/// Allocates a subbuffer for sized data.
///
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `T` has an alignment greater than `64`.
pub fn allocate_sized<T>(&self) -> Result<Subbuffer<T>, AllocationCreationError> {
let layout = DeviceLayout::from_layout(Layout::new::<T>())
.expect("can't allocate memory for zero-sized types");
pub fn allocate_sized<T>(&self) -> Result<Subbuffer<T>, AllocationCreationError>
where
T: BufferContents,
{
let layout = T::LAYOUT.unwrap_sized();
self.allocate(layout)
.map(|subbuffer| unsafe { subbuffer.reinterpret() })
unsafe { &mut *self.state.get() }
.allocate(layout)
.map(|subbuffer| unsafe { subbuffer.reinterpret_unchecked() })
}
/// Allocates a subbuffer for a slice.
///
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `T` has an alignment greater than `64`.
/// - Panics if `len` is zero.
pub fn allocate_slice<T>(
&self,
len: DeviceSize,
) -> Result<Subbuffer<[T]>, AllocationCreationError> {
let layout =
DeviceLayout::from_layout(Layout::array::<T>(len.try_into().unwrap()).unwrap())
.expect("can't allocate memory for zero-sized types");
) -> Result<Subbuffer<[T]>, AllocationCreationError>
where
T: BufferContents,
{
self.allocate_unsized(len)
}
self.allocate(layout)
.map(|subbuffer| unsafe { subbuffer.reinterpret() })
/// Allocates a subbuffer for unsized data.
///
/// # Panics
///
/// - Panics if `len` is zero.
pub fn allocate_unsized<T>(
&self,
len: DeviceSize,
) -> Result<Subbuffer<T>, AllocationCreationError>
where
T: BufferContents + ?Sized,
{
let len = NonZeroDeviceSize::new(len).expect("empty slices are not valid buffer contents");
let layout = T::LAYOUT.layout_for_len(len).unwrap();
unsafe { &mut *self.state.get() }
.allocate(layout)
.map(|subbuffer| unsafe { subbuffer.reinterpret_unchecked() })
}
/// Allocates a subbuffer with the given `layout`.
@ -239,11 +252,7 @@ where
) -> Result<Subbuffer<[u8]>, AllocationCreationError> {
assert!(layout.alignment().as_devicesize() <= 64);
let state = unsafe { &mut *self.state.get() };
let offset = state.allocate(layout)?;
let arena = state.arena.as_ref().unwrap().clone();
Ok(Subbuffer::from_arena(arena, offset, layout.size()))
unsafe { &mut *self.state.get() }.allocate(layout)
}
}
@ -277,7 +286,10 @@ impl<A> SubbufferAllocatorState<A>
where
A: MemoryAllocator,
{
fn allocate(&mut self, layout: DeviceLayout) -> Result<DeviceSize, AllocationCreationError> {
fn allocate(
&mut self,
layout: DeviceLayout,
) -> Result<Subbuffer<[u8]>, AllocationCreationError> {
let size = layout.size();
let alignment = cmp::max(layout.alignment(), self.buffer_alignment);
@ -310,7 +322,7 @@ where
let offset = offset - arena_offset;
self.free_start = offset + size;
return Ok(offset);
return Ok(Subbuffer::from_arena(arena.clone(), offset, layout.size()));
}
// We reached the end of the arena, grab the next one.

View File

@ -27,8 +27,8 @@
//! `VkBuffer`, and as such doesn't hold onto any memory.
//! - [`Buffer`] is a `RawBuffer` with memory bound to it, and with state tracking.
//! - [`Subbuffer`] is what you will use most of the time, as it is what all the APIs expect. It is
//! reference to a portion of a `Buffer`. `Subbuffer` also has a type parameter, which is a hint
//! for how the data in the portion of the buffer is going to be interpreted.
//! a reference to a portion of a `Buffer`. `Subbuffer` also has a type parameter, which is a
//! hint for how the data in the portion of the buffer is going to be interpreted.
//!
//! # `Subbuffer` allocation
//!
@ -98,7 +98,10 @@
//! [the `view` module]: self::view
//! [the `shader` module documentation]: crate::shader
pub use self::{subbuffer::Subbuffer, usage::BufferUsage};
pub use self::{
subbuffer::{BufferContents, BufferContentsLayout, Subbuffer},
usage::BufferUsage,
};
use self::{
subbuffer::{ReadLockError, WriteLockError},
sys::{BufferCreateInfo, RawBuffer},
@ -119,15 +122,13 @@ use crate::{
DeviceSize, NonZeroDeviceSize, RequirementNotMet, RequiresOneOf, Version, VulkanError,
VulkanObject,
};
use bytemuck::{Pod, PodCastError};
use parking_lot::{Mutex, MutexGuard};
use smallvec::SmallVec;
use std::{
alloc::Layout,
error::Error,
fmt::{Display, Error as FmtError, Formatter},
hash::{Hash, Hasher},
mem::{size_of, size_of_val},
mem::size_of_val,
ops::Range,
ptr,
sync::Arc,
@ -282,8 +283,6 @@ impl Buffer {
///
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `T` has an alignment greater than `64`.
/// - Panics if `iter` is empty.
pub fn from_iter<T, I>(
allocator: &(impl MemoryAllocator + ?Sized),
@ -291,7 +290,7 @@ impl Buffer {
iter: I,
) -> Result<Subbuffer<[T]>, BufferError>
where
[T]: BufferContents,
T: BufferContents,
I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator,
{
@ -307,20 +306,17 @@ impl Buffer {
/// Creates a new uninitialized `Buffer` for sized data. Returns a [`Subbuffer`] spanning the
/// whole buffer.
///
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `T` has an alignment greater than `64`.
pub fn new_sized<T>(
allocator: &(impl MemoryAllocator + ?Sized),
allocate_info: BufferAllocateInfo,
) -> Result<Subbuffer<T>, BufferError> {
let layout = Layout::new::<T>()
.try_into()
.expect("can't allocate memory for zero-sized types");
) -> Result<Subbuffer<T>, BufferError>
where
T: BufferContents,
{
let layout = T::LAYOUT.unwrap_sized();
let buffer = Subbuffer::new(Buffer::new(allocator, allocate_info, layout)?);
Buffer::new(allocator, allocate_info, layout).map(Subbuffer::from_buffer)
Ok(unsafe { buffer.reinterpret_unchecked() })
}
/// Creates a new uninitialized `Buffer` for a slice. Returns a [`Subbuffer`] spanning the
@ -328,20 +324,37 @@ impl Buffer {
///
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `T` has an alignment greater than `64`.
/// - Panics if `len` is zero.
pub fn new_slice<T>(
allocator: &(impl MemoryAllocator + ?Sized),
allocate_info: BufferAllocateInfo,
len: DeviceSize,
) -> Result<Subbuffer<[T]>, BufferError> {
let layout = Layout::array::<T>(len.try_into().unwrap())
.unwrap()
.try_into()
.expect("can't allocate memory for zero-sized types");
) -> Result<Subbuffer<[T]>, BufferError>
where
T: BufferContents,
{
Buffer::new_unsized(allocator, allocate_info, len)
}
Buffer::new(allocator, allocate_info, layout).map(Subbuffer::from_buffer)
/// Creates a new uninitialized `Buffer` for unsized data. Returns a [`Subbuffer`] spanning the
/// whole buffer.
///
/// # Panics
///
/// - Panics if `len` is zero.
pub fn new_unsized<T>(
allocator: &(impl MemoryAllocator + ?Sized),
allocate_info: BufferAllocateInfo,
len: DeviceSize,
) -> Result<Subbuffer<T>, BufferError>
where
T: BufferContents + ?Sized,
{
let len = NonZeroDeviceSize::new(len).expect("empty slices are not valid buffer contents");
let layout = T::LAYOUT.layout_for_len(len).unwrap();
let buffer = Subbuffer::new(Buffer::new(allocator, allocate_info, layout)?);
Ok(unsafe { buffer.reinterpret_unchecked() })
}
/// Creates a new uninitialized `Buffer` with the given `layout`.
@ -1096,75 +1109,6 @@ vulkan_bitflags! {
},*/
}
/// Trait for types of data that can be put in a buffer. These can be safely transmuted to and from
/// a slice of bytes.
pub unsafe trait BufferContents: Send + Sync + 'static {
/// Converts an immutable reference to `Self` to an immutable byte slice.
fn as_bytes(&self) -> &[u8];
/// Converts a mutable reference to `Self` to a mutable byte slice.
fn as_bytes_mut(&mut self) -> &mut [u8];
/// Converts an immutable byte slice into an immutable reference to `Self`.
fn from_bytes(bytes: &[u8]) -> Result<&Self, PodCastError>;
/// Converts a mutable byte slice into a mutable reference to `Self`.
fn from_bytes_mut(bytes: &mut [u8]) -> Result<&mut Self, PodCastError>;
/// Returns the size of an element of the type.
fn size_of_element() -> DeviceSize;
}
unsafe impl<T> BufferContents for T
where
T: Pod + Send + Sync,
{
fn as_bytes(&self) -> &[u8] {
bytemuck::bytes_of(self)
}
fn as_bytes_mut(&mut self) -> &mut [u8] {
bytemuck::bytes_of_mut(self)
}
fn from_bytes(bytes: &[u8]) -> Result<&T, PodCastError> {
bytemuck::try_from_bytes(bytes)
}
fn from_bytes_mut(bytes: &mut [u8]) -> Result<&mut T, PodCastError> {
bytemuck::try_from_bytes_mut(bytes)
}
fn size_of_element() -> DeviceSize {
1
}
}
unsafe impl<T> BufferContents for [T]
where
T: Pod + Send + Sync,
{
fn as_bytes(&self) -> &[u8] {
bytemuck::cast_slice(self)
}
fn as_bytes_mut(&mut self) -> &mut [u8] {
bytemuck::cast_slice_mut(self)
}
fn from_bytes(bytes: &[u8]) -> Result<&[T], PodCastError> {
bytemuck::try_cast_slice(bytes)
}
fn from_bytes_mut(bytes: &mut [u8]) -> Result<&mut [T], PodCastError> {
bytemuck::try_cast_slice_mut(bytes)
}
fn size_of_element() -> DeviceSize {
size_of::<T>() as DeviceSize
}
}
/// The buffer configuration to query in
/// [`PhysicalDevice::external_buffer_properties`](crate::device::physical::PhysicalDevice::external_buffer_properties).
#[derive(Clone, Debug, PartialEq, Eq, Hash)]

View File

@ -7,9 +7,12 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use super::{allocator::Arena, Buffer, BufferContents, BufferError, BufferMemory};
//! A subpart of a buffer.
use super::{allocator::Arena, Buffer, BufferError, BufferMemory};
use crate::{
device::{Device, DeviceOwned},
macros::try_opt,
memory::{
self,
allocator::{align_down, align_up, DeviceAlignment, DeviceLayout},
@ -17,19 +20,23 @@ use crate::{
},
DeviceSize, NonZeroDeviceSize,
};
use bytemuck::PodCastError;
use bytemuck::{AnyBitPattern, PodCastError};
use std::{
alloc::Layout,
cmp,
error::Error,
ffi::c_void,
fmt::{Display, Error as FmtError, Formatter},
hash::{Hash, Hasher},
marker::PhantomData,
mem::{self, align_of, size_of},
ops::{Deref, DerefMut, Range, RangeBounds},
ptr::{self, NonNull},
sync::Arc,
};
pub use vulkano_macros::BufferContents;
/// A subpart of a buffer.
///
/// This type doesn't correspond to any Vulkan object, it exists for API convenience. Most Vulkan
@ -69,15 +76,6 @@ enum SubbufferParent {
}
impl<T: ?Sized> Subbuffer<T> {
pub(super) fn from_buffer(buffer: Arc<Buffer>) -> Self {
Subbuffer {
offset: 0,
size: buffer.size(),
parent: SubbufferParent::Buffer(buffer),
marker: PhantomData,
}
}
pub(super) fn from_arena(arena: Arc<Arena>, offset: DeviceSize, size: DeviceSize) -> Self {
Subbuffer {
offset,
@ -120,6 +118,20 @@ impl<T: ?Sized> Subbuffer<T> {
}
}
/// Returns the mapped pointer to the start of the subbuffer if the memory is host-visible,
/// otherwise returns [`None`].
pub fn mapped_ptr(&self) -> Option<NonNull<c_void>> {
match self.buffer().memory() {
BufferMemory::Normal(a) => a.mapped_ptr().map(|ptr| {
// SAFETY: The original address came from the Vulkan implementation, and allocation
// sizes are guaranteed to not exceed `isize::MAX` when there's a mapped pointer,
// so the offset better be in range.
unsafe { NonNull::new_unchecked(ptr.as_ptr().add(self.offset as usize)) }
}),
BufferMemory::Sparse => unreachable!(),
}
}
/// Returns the device address for this subbuffer.
pub fn device_address(&self) -> Result<NonZeroDeviceSize, BufferError> {
self.buffer().device_address().map(|ptr| {
@ -130,46 +142,31 @@ impl<T: ?Sized> Subbuffer<T> {
})
}
/// Changes the `T` generic parameter of the subbffer to the desired type.
///
/// You should **always** prefer the safe functions [`try_from_bytes`], [`into_bytes`],
/// [`try_cast`], [`try_cast_slice`] or [`into_slice`].
///
/// # Safety
///
/// - Correct offset and size must be ensured before using this `Subbuffer` on the device.
///
/// [`try_from_bytes`]: Self::try_from_bytes
/// [`into_bytes`]: Self::into_bytes
/// [`try_cast`]: Self::try_cast
/// [`try_cast_slice`]: Self::try_cast_slice
/// [`into_slice`]: Self::into_slice
pub unsafe fn reinterpret<U: ?Sized>(self) -> Subbuffer<U> {
// SAFETY: All `Subbuffer`s share the same layout.
mem::transmute::<Subbuffer<T>, Subbuffer<U>>(self)
}
/// Same as [`reinterpret`], except it works with a reference to the subbuffer.
///
/// [`reinterpret`]: Self::reinterpret
pub unsafe fn reinterpret_ref<U: ?Sized>(&self) -> &Subbuffer<U> {
assert!(size_of::<Subbuffer<T>>() == size_of::<Subbuffer<U>>());
assert!(align_of::<Subbuffer<T>>() == align_of::<Subbuffer<U>>());
// SAFETY: All `Subbuffer`s share the same layout.
mem::transmute::<&Subbuffer<T>, &Subbuffer<U>>(self)
}
/// Casts the subbuffer to a slice of raw bytes.
pub fn into_bytes(self) -> Subbuffer<[u8]> {
unsafe { self.reinterpret() }
unsafe { self.reinterpret_unchecked_inner() }
}
/// Same as [`into_bytes`], except it works with a reference to the subbuffer.
///
/// [`into_bytes`]: Self::into_bytes
pub fn as_bytes(&self) -> &Subbuffer<[u8]> {
unsafe { self.reinterpret_ref() }
unsafe { self.reinterpret_ref_unchecked_inner() }
}
#[inline(always)]
unsafe fn reinterpret_unchecked_inner<U: ?Sized>(self) -> Subbuffer<U> {
// SAFETY: All `Subbuffer`s share the same layout.
mem::transmute::<Subbuffer<T>, Subbuffer<U>>(self)
}
#[inline(always)]
unsafe fn reinterpret_ref_unchecked_inner<U: ?Sized>(&self) -> &Subbuffer<U> {
assert!(size_of::<Subbuffer<T>>() == size_of::<Subbuffer<U>>());
assert!(align_of::<Subbuffer<T>>() == align_of::<Subbuffer<U>>());
// SAFETY: All `Subbuffer`s share the same layout.
mem::transmute::<&Subbuffer<T>, &Subbuffer<U>>(self)
}
}
@ -177,6 +174,53 @@ impl<T> Subbuffer<T>
where
T: BufferContents + ?Sized,
{
/// Changes the `T` generic parameter of the subbffer to the desired type without checking if
/// the contents are correctly aligned and sized.
///
/// **NEVER use this function** unless you absolutely have to, and even then, open an issue on
/// GitHub instead. **An unaligned / incorrectly sized subbuffer is undefined behavior _both on
/// the Rust and the Vulkan side!_**
///
/// # Safety
///
/// - `self.memory_offset()` must be properly aligned for `U`.
/// - `self.size()` must be valid for `U`, which means:
/// - If `U` is sized, the size must match exactly.
/// - If `U` is unsized, then the subbuffer size minus the size of the head (sized part) of
/// the DST must be evenly divisible by the size of the element type.
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn reinterpret_unchecked<U>(self) -> Subbuffer<U>
where
U: BufferContents + ?Sized,
{
let element_size = U::LAYOUT.element_size().unwrap_or(1);
debug_assert!(is_aligned(self.memory_offset(), U::LAYOUT.alignment()));
debug_assert!(self.size >= U::LAYOUT.head_size());
debug_assert!((self.size - U::LAYOUT.head_size()) % element_size == 0);
self.reinterpret_unchecked_inner()
}
/// Same as [`reinterpret_unchecked`], except it works with a reference to the subbuffer.
///
/// # Safety
///
/// Please read the safety docs on [`reinterpret_unchecked`] carefully.
///
/// [`reinterpret_unchecked`]: Self::reinterpret_unchecked
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn reinterpret_ref_unchecked<U>(&self) -> &Subbuffer<U>
where
U: BufferContents + ?Sized,
{
let element_size = U::LAYOUT.element_size().unwrap_or(1);
debug_assert!(is_aligned(self.memory_offset(), U::LAYOUT.alignment()));
debug_assert!(self.size >= U::LAYOUT.head_size());
debug_assert!((self.size - U::LAYOUT.head_size()) % element_size == 0);
self.reinterpret_ref_unchecked_inner()
}
/// Locks the subbuffer in order to read its content from the host.
///
/// If the subbuffer is currently used in exclusive mode by the device, this function will
@ -209,9 +253,7 @@ where
BufferMemory::Sparse => todo!("`Subbuffer::read` doesn't support sparse binding yet"),
};
let range = self.range();
let aligned_range = if let Some(atom_size) = allocation.atom_size() {
let range = if let Some(atom_size) = allocation.atom_size() {
// This works because the suballocators align allocations to the non-coherent atom size
// when the memory is host-visible but not host-coherent.
let start = align_down(self.offset, atom_size);
@ -222,12 +264,12 @@ where
Range { start, end }
} else {
range.clone()
self.range()
};
let mut state = self.buffer().state();
state.check_cpu_read(aligned_range.clone())?;
unsafe { state.cpu_read_lock(aligned_range.clone()) };
state.check_cpu_read(range.clone())?;
unsafe { state.cpu_read_lock(range.clone()) };
if allocation.atom_size().is_some() {
// If there are other read locks being held at this point, they also called
@ -235,16 +277,17 @@ where
// lock, so there will be no new data and this call will do nothing.
// TODO: probably still more efficient to call it only if we're the first to acquire a
// read lock, but the number of CPU locks isn't currently tracked anywhere.
unsafe { allocation.invalidate_range(aligned_range.clone()) }?;
unsafe { allocation.invalidate_range(range.clone()) }?;
}
let bytes = unsafe { allocation.read(range) }.ok_or(BufferError::MemoryNotHostVisible)?;
let data = T::from_bytes(bytes).unwrap();
let mapped_ptr = self.mapped_ptr().ok_or(BufferError::MemoryNotHostVisible)?;
// SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
let data = unsafe { &*T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) };
Ok(BufferReadGuard {
subbuffer: self,
data,
range: aligned_range,
range,
})
}
@ -279,9 +322,7 @@ where
BufferMemory::Sparse => todo!("`Subbuffer::write` doesn't support sparse binding yet"),
};
let range = self.range();
let aligned_range = if let Some(atom_size) = allocation.atom_size() {
let range = if let Some(atom_size) = allocation.atom_size() {
// This works because the suballocators align allocations to the non-coherent atom size
// when the memory is host-visible but not host-coherent.
let start = align_down(self.offset, atom_size);
@ -292,24 +333,25 @@ where
Range { start, end }
} else {
range.clone()
self.range()
};
let mut state = self.buffer().state();
state.check_cpu_write(aligned_range.clone())?;
unsafe { state.cpu_write_lock(aligned_range.clone()) };
state.check_cpu_write(range.clone())?;
unsafe { state.cpu_write_lock(range.clone()) };
if allocation.atom_size().is_some() {
unsafe { allocation.invalidate_range(aligned_range.clone()) }?;
unsafe { allocation.invalidate_range(range.clone()) }?;
}
let bytes = unsafe { allocation.write(range) }.ok_or(BufferError::MemoryNotHostVisible)?;
let data = T::from_bytes_mut(bytes).unwrap();
let mapped_ptr = self.mapped_ptr().ok_or(BufferError::MemoryNotHostVisible)?;
// SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
let data = unsafe { &mut *T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) };
Ok(BufferWriteGuard {
subbuffer: self,
data,
range: aligned_range,
range,
})
}
}
@ -317,7 +359,14 @@ where
impl<T> Subbuffer<T> {
/// Converts the subbuffer to a slice of one element.
pub fn into_slice(self) -> Subbuffer<[T]> {
unsafe { self.reinterpret() }
unsafe { self.reinterpret_unchecked_inner() }
}
/// Same as [`into_slice`], except it works with a reference to the subbuffer.
///
/// [`into_slice`]: Self::into_slice
pub fn as_slice(&self) -> &Subbuffer<[T]> {
unsafe { self.reinterpret_ref_unchecked_inner() }
}
}
@ -326,35 +375,21 @@ where
T: BufferContents,
{
/// Tries to cast a subbuffer of raw bytes to a `Subbuffer<T>`.
///
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `T` has an alignment greater than `64`.
pub fn try_from_bytes(subbuffer: Subbuffer<[u8]>) -> Result<Self, PodCastError> {
assert_valid_type_param::<T>();
if subbuffer.size() != size_of::<T>() as DeviceSize {
Err(PodCastError::SizeMismatch)
} else if !is_aligned(subbuffer.memory_offset(), DeviceAlignment::of::<T>()) {
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else {
Ok(unsafe { subbuffer.reinterpret() })
Ok(unsafe { subbuffer.reinterpret_unchecked() })
}
}
/// Tries to cast the subbuffer to a different type.
///
/// # Panics
///
/// - Panics if `U` has zero size.
/// - Panics if `U` has an alignment greater than `64`.
pub fn try_cast<U>(self) -> Result<Subbuffer<U>, PodCastError>
where
U: BufferContents,
{
assert_valid_type_param::<U>();
if size_of::<U>() != size_of::<T>() {
Err(PodCastError::SizeMismatch)
} else if align_of::<U>() > align_of::<T>()
@ -362,7 +397,7 @@ where
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else {
Ok(unsafe { self.reinterpret() })
Ok(unsafe { self.reinterpret_unchecked() })
}
}
}
@ -370,9 +405,7 @@ where
impl<T> Subbuffer<[T]> {
/// Returns the number of elements in the slice.
pub fn len(&self) -> DeviceSize {
assert_valid_type_param::<T>();
debug_assert!(self.size() % size_of::<T>() as DeviceSize == 0);
debug_assert!(self.size % size_of::<T>() as DeviceSize == 0);
self.size / size_of::<T>() as DeviceSize
}
@ -381,7 +414,7 @@ impl<T> Subbuffer<[T]> {
///
/// # Panics
///
/// - Panics if `index` is out of range.
/// - Panics if `index` is out of bounds.
pub fn index(self, index: DeviceSize) -> Subbuffer<T> {
assert!(index <= self.len());
@ -403,11 +436,13 @@ impl<T> Subbuffer<[T]> {
/// # Panics
///
/// - Panics if `range` is out of bounds.
/// - Panics if `range` is empty.
pub fn slice(mut self, range: impl RangeBounds<DeviceSize>) -> Subbuffer<[T]> {
let Range { start, end } = memory::range(range, ..self.len()).unwrap();
self.offset += start * size_of::<T>() as DeviceSize;
self.size = (end - start) * size_of::<T>() as DeviceSize;
assert!(self.size != 0);
self
}
@ -418,6 +453,7 @@ impl<T> Subbuffer<[T]> {
self.offset += start * size_of::<T>() as DeviceSize;
self.size = (end - start) * size_of::<T>() as DeviceSize;
debug_assert!(self.size != 0);
self
}
@ -426,9 +462,10 @@ impl<T> Subbuffer<[T]> {
///
/// # Panics
///
/// - Panics if `mid` is out of bounds.
/// - Panics if `mid` is not greater than `0`.
/// - Panics if `mid` is not less than `self.len()`.
pub fn split_at(self, mid: DeviceSize) -> (Subbuffer<[T]>, Subbuffer<[T]>) {
assert!(mid <= self.len());
assert!(0 < mid && mid < self.len());
unsafe { self.split_at_unchecked(mid) }
}
@ -443,6 +480,17 @@ impl<T> Subbuffer<[T]> {
}
impl Subbuffer<[u8]> {
/// Creates a new `Subbuffer<[u8]>` spanning the whole buffer.
#[inline]
pub fn new(buffer: Arc<Buffer>) -> Self {
Subbuffer {
offset: 0,
size: buffer.size(),
parent: SubbufferParent::Buffer(buffer),
marker: PhantomData,
}
}
/// Casts the slice to a different element type while ensuring correct alignment for the type.
///
/// The offset of the subbuffer is rounded up to the alignment of `T` and the size abjusted for
@ -451,13 +499,14 @@ impl Subbuffer<[u8]> {
/// # Panics
///
/// - Panics if the aligned offset would be out of bounds.
/// - Panics if `T` has zero size.
/// - Panics if `T` has an alignment greater than `64`.
pub fn cast_aligned<T>(self) -> Subbuffer<[T]> {
pub fn cast_aligned<T>(self) -> Subbuffer<[T]>
where
T: BufferContents,
{
let layout = DeviceLayout::from_layout(Layout::new::<T>()).unwrap();
let aligned = self.align_to(layout);
unsafe { aligned.reinterpret() }
unsafe { aligned.reinterpret_unchecked() }
}
/// Aligns the subbuffer to the given `layout` by rounding the offset up to
@ -468,6 +517,7 @@ impl Subbuffer<[u8]> {
///
/// - Panics if the aligned offset would be out of bounds.
/// - Panics if `layout.alignment()` exceeds `64`.
#[inline]
pub fn align_to(mut self, layout: DeviceLayout) -> Subbuffer<[u8]> {
assert!(layout.alignment().as_devicesize() <= 64);
@ -484,20 +534,13 @@ impl Subbuffer<[u8]> {
impl<T> Subbuffer<[T]>
where
[T]: BufferContents,
T: BufferContents,
{
/// Tries to cast the slice to a different element type.
///
/// # Panics
///
/// - Panics if `U` has zero size.
/// - Panics if `U` has an alignment greater than `64`.
pub fn try_cast_slice<U>(self) -> Result<Subbuffer<[U]>, PodCastError>
where
[U]: BufferContents,
U: BufferContents,
{
assert_valid_type_param::<U>();
if size_of::<U>() != size_of::<T>() && self.size() % size_of::<U>() as DeviceSize != 0 {
Err(PodCastError::OutputSliceWouldHaveSlop)
} else if align_of::<U>() > align_of::<T>()
@ -505,21 +548,15 @@ where
{
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
} else {
Ok(unsafe { self.reinterpret() })
Ok(unsafe { self.reinterpret_unchecked() })
}
}
}
#[inline(always)]
fn assert_valid_type_param<T>() {
assert!(size_of::<T>() != 0);
assert!(align_of::<T>() <= 64);
}
impl From<Arc<Buffer>> for Subbuffer<[u8]> {
#[inline]
fn from(buffer: Arc<Buffer>) -> Self {
Self::from_buffer(buffer)
Self::new(buffer)
}
}
@ -675,6 +712,425 @@ impl Display for WriteLockError {
}
}
/// Trait for types of data that can be put in a buffer.
///
/// This trait is not intended to be implemented manually (ever) and attempting so will make you
/// one sad individual very quickly. Rather you should use [the derive macro]. Note also that there
/// are blanket implementations of this trait: you don't need to implement it if the type in
/// question already implements bytemuck's [`AnyBitPattern`]. Most if not all linear algebra crates
/// have a feature flag that you can enable for bytemuck support. The trait is also already
/// implemented for all slices where the element type implements `BufferContents`.
///
/// # Examples
///
/// Deriving the trait for sized types:
///
/// ```
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// x: f32,
/// y: f32,
/// array: [i32; 12],
/// }
/// ```
///
/// Deriving the trait for unsized types works the same:
///
/// ```
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// x: f32,
/// y: f32,
/// slice: [i32],
/// }
/// ```
///
/// This even works if the last field is a user-defined DST too:
///
/// ```
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// x: f32,
/// y: f32,
/// other: OtherData,
/// }
///
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct OtherData {
/// slice: [i32],
/// }
/// ```
///
/// You can also use generics if you please:
///
/// ```
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData<T, U> {
/// x: T,
/// y: T,
/// slice: [U],
/// }
/// ```
///
/// This even works with dependently-sized types:
///
/// ```
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData<T>
/// where
/// T: ?Sized,
/// {
/// x: f32,
/// y: f32,
/// z: T,
/// }
/// ```
///
/// [the derive macro]: vulkano_macros::BufferContents
//
// If you absolutely *must* implement this trait by hand, here are the safety requirements (but
// please open an issue on GitHub instead):
//
// - The type must be a struct and all fields must implement `BufferContents`.
// - `LAYOUT` must be the correct layout for the type, which also means the type must either be
// sized or if it's unsized then its metadata must be the same as that of a slice. Implementing
// `BufferContents` for any other kind of DST is instantaneous horrifically undefined behavior.
// - `from_ffi` must create a pointer with the same address as the `data` parameter that is passed
// in. The pointer is expected to be aligned properly already.
// - `from_ffi` must create a pointer that is expected to be valid for reads (and potentially
// writes) for exactly `range` bytes. The `data` and `range` are expected to be valid for the
// `LAYOUT`.
pub unsafe trait BufferContents: Send + Sync + 'static {
/// The layout of the contents.
const LAYOUT: BufferContentsLayout;
/// Creates a pointer to `Self` from a pointer to the start of the data and a range in bytes.
///
/// # Safety
///
/// - If `Self` is sized, then `range` must match the size exactly.
/// - If `Self` is unsized, then the `range` minus the size of the head (sized part) of the DST
/// must be evenly divisible by the size of the element type.
#[doc(hidden)]
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self;
}
unsafe impl<T> BufferContents for T
where
T: AnyBitPattern + Send + Sync,
{
const LAYOUT: BufferContentsLayout =
if let Some(layout) = BufferContentsLayout::from_sized(Layout::new::<T>()) {
layout
} else {
panic!("zero-sized types are not valid buffer contents");
};
#[inline(always)]
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
debug_assert!(range == size_of::<T>());
debug_assert!(data as usize % align_of::<T>() == 0);
data.cast()
}
}
unsafe impl<T> BufferContents for [T]
where
T: BufferContents,
{
const LAYOUT: BufferContentsLayout = BufferContentsLayout(BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout: T::LAYOUT.unwrap_sized(),
});
#[inline(always)]
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
debug_assert!(range % size_of::<T>() == 0);
debug_assert!(data as usize % align_of::<T>() == 0);
let len = range / size_of::<T>();
ptr::slice_from_raw_parts_mut(data.cast(), len)
}
}
/// Describes the layout required for a type so that it can be read from/written to a buffer. This
/// is used to allocate (sub)buffers generically.
///
/// This is similar to [`DeviceLayout`] except that this exists for the sole purpose of describing
/// the layout of buffer contents specifically. Which means for example that the sizedness of the
/// type is captured, as well as the layout of the head and tail if the layout is for unsized data,
/// in order to be able to represent everything that Vulkan can stuff in a buffer.
///
/// `BufferContentsLayout` also has an additional invariant compared to `DeviceLayout`: the
/// alignment of the data must not exceed `64`. This is because that's the guaranteed alignment
/// that all `DeviceMemory` blocks must be aligned to at minimum, and hence any greater alignment
/// can't be guaranteed. Other than that, the invariant that sizes must be non-zero applies here as
/// well, for both sized data and the element type of unsized data.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct BufferContentsLayout(BufferContentsLayoutInner);
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
enum BufferContentsLayoutInner {
Sized(DeviceLayout),
Unsized {
head_layout: Option<DeviceLayout>,
element_layout: DeviceLayout,
},
}
impl BufferContentsLayout {
/// Returns the size of the head (sized part). If the data has no sized part, then this will
/// return 0.
#[inline]
pub const fn head_size(&self) -> DeviceSize {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => sized.size(),
BufferContentsLayoutInner::Unsized {
head_layout: None, ..
} => 0,
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
..
} => head_layout.size(),
}
}
/// Returns the size of the element type if the data is unsized, or returns [`None`].
/// Guaranteed to be non-zero.
#[inline]
pub const fn element_size(&self) -> Option<DeviceSize> {
match &self.0 {
BufferContentsLayoutInner::Sized(_) => None,
BufferContentsLayoutInner::Unsized { element_layout, .. } => {
Some(element_layout.size())
}
}
}
/// Returns the alignment required for the data. Guaranteed to not exceed `64`.
#[inline]
pub const fn alignment(&self) -> DeviceAlignment {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => sized.alignment(),
BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout,
} => element_layout.alignment(),
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
..
} => head_layout.alignment(),
}
}
/// Returns the [`DeviceLayout`] for the data for the given `len`, or returns [`None`] on
/// arithmetic overflow or if the total size would exceed [`DeviceLayout::MAX_SIZE`].
#[inline]
pub const fn layout_for_len(&self, len: NonZeroDeviceSize) -> Option<DeviceLayout> {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => Some(*sized),
BufferContentsLayoutInner::Unsized {
head_layout,
element_layout,
} => {
let (tail_layout, _) = try_opt!(element_layout.repeat(len));
if let Some(head_layout) = head_layout {
let (layout, _) = try_opt!(head_layout.extend(tail_layout));
Some(layout.pad_to_alignment())
} else {
Some(tail_layout)
}
}
}
}
/// Creates a new `BufferContentsLayout` from a sized layout. This is inteded for use by the
/// derive macro only.
#[doc(hidden)]
#[inline]
pub const fn from_sized(sized: Layout) -> Option<Self> {
assert!(
sized.align() <= 64,
"types with alignments above 64 are not valid buffer contents",
);
if let Ok(sized) = DeviceLayout::from_layout(sized) {
Some(Self(BufferContentsLayoutInner::Sized(sized)))
} else {
None
}
}
/// Creates a new `BufferContentsLayout` from a head and element layout. This is inteded for
/// use by the derive macro only.
#[doc(hidden)]
#[inline]
pub const fn from_head_element_layout(
head_layout: Layout,
element_layout: Layout,
) -> Option<Self> {
if head_layout.align() > 64 || element_layout.align() > 64 {
panic!("types with alignments above 64 are not valid buffer contents");
}
// The head of a `BufferContentsLayout` can be zero-sized.
// TODO: Replace with `Result::ok` once its constness is stabilized.
let head_layout = if let Ok(head_layout) = DeviceLayout::from_layout(head_layout) {
Some(head_layout)
} else {
None
};
if let Ok(element_layout) = DeviceLayout::from_layout(element_layout) {
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout,
element_layout,
}))
} else {
None
}
}
/// Extends the given `previous` [`Layout`] by `self`. This is intended for use by the derive
/// macro only.
#[doc(hidden)]
#[inline]
pub const fn extend_from_layout(self, previous: &Layout) -> Option<Self> {
assert!(
previous.align() <= 64,
"types with alignments above 64 are not valid buffer contents",
);
match self.0 {
BufferContentsLayoutInner::Sized(sized) => {
let (sized, _) = try_opt!(sized.extend_from_layout(previous));
Some(Self(BufferContentsLayoutInner::Sized(sized)))
}
BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout,
} => {
// The head of a `BufferContentsLayout` can be zero-sized.
// TODO: Replace with `Result::ok` once its constness is stabilized.
let head_layout = if let Ok(head_layout) = DeviceLayout::from_layout(*previous) {
Some(head_layout)
} else {
None
};
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout,
element_layout,
}))
}
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout,
} => {
let (head_layout, _) = try_opt!(head_layout.extend_from_layout(previous));
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout,
}))
}
}
}
/// Creates a new `BufferContentsLayout` by rounding up the size of the head to the nearest
/// multiple of its alignment if the layout is sized, or by rounding up the size of the head to
/// the nearest multiple of the alignment of the element type and aligning the head to the
/// alignment of the element type if there is a sized part. Doesn't do anything if there is no
/// sized part. Returns [`None`] if the new head size would exceed [`DeviceLayout::MAX_SIZE`].
/// This is inteded for use by the derive macro only.
#[doc(hidden)]
#[inline]
pub const fn pad_to_alignment(&self) -> Option<Self> {
match &self.0 {
BufferContentsLayoutInner::Sized(sized) => Some(Self(
BufferContentsLayoutInner::Sized(sized.pad_to_alignment()),
)),
BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout,
} => Some(Self(BufferContentsLayoutInner::Unsized {
head_layout: None,
element_layout: *element_layout,
})),
BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout,
} => {
// We must pad the head to the alignment of the element type, *not* the alignment
// of the head.
//
// Consider a head layout of `(u8, u8, u8)` and an element layout of `u32`. If we
// padded the head to its own alignment, like is the case for sized layouts, it
// wouldn't change the size. Yet there is padding between the head and the first
// element of the slice.
//
// The reverse is true: consider a head layout of `(u16, u8)` and an element layout
// of `u8`. If we padded the head to its own alignment, it would be too large.
let padded_head_size =
head_layout.size() + head_layout.padding_needed_for(element_layout.alignment());
// SAFETY: `BufferContentsLayout`'s invariant guarantees that the alignment of the
// element type doesn't exceed 64, which together with the overflow invariant of
// `DeviceLayout` means that this can't overflow.
let padded_head_size =
unsafe { NonZeroDeviceSize::new_unchecked(padded_head_size) };
// We have to align the head to the alignment of the element type, so that the
// struct as a whole is aligned correctly when a different struct is extended with
// this one.
//
// Note that this is *not* the same as aligning the head to the alignment of the
// element type and then padding the layout to its alignment. Consider the same
// layout from above, with a head layout of `(u16, u8)` and an element layout of
// `u8`. If we aligned the head to the element type and then padded it to its own
// alignment, we would get the same wrong result as above. This instead ensures the
// head is padded to the element and aligned to it, without the alignment of the
// head interfering.
let alignment =
DeviceAlignment::max(head_layout.alignment(), element_layout.alignment());
if let Some(head_layout) = DeviceLayout::new(padded_head_size, alignment) {
Some(Self(BufferContentsLayoutInner::Unsized {
head_layout: Some(head_layout),
element_layout: *element_layout,
}))
} else {
None
}
}
}
}
pub(super) const fn unwrap_sized(self) -> DeviceLayout {
match self.0 {
BufferContentsLayoutInner::Sized(sized) => sized,
BufferContentsLayoutInner::Unsized { .. } => {
panic!("called `BufferContentsLayout::unwrap_sized` on an unsized layout");
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -692,6 +1148,68 @@ mod tests {
},
};
#[test]
fn derive_buffer_contents() {
#[derive(BufferContents)]
#[repr(C)]
struct Test1(u32, u64, u8);
assert_eq!(Test1::LAYOUT.head_size() as usize, size_of::<Test1>());
assert_eq!(Test1::LAYOUT.element_size(), None);
assert_eq!(
Test1::LAYOUT.alignment().as_devicesize() as usize,
align_of::<Test1>(),
);
#[derive(BufferContents)]
#[repr(C)]
struct Composite1(Test1, [f32; 10], Test1);
assert_eq!(
Composite1::LAYOUT.head_size() as usize,
size_of::<Composite1>(),
);
assert_eq!(Composite1::LAYOUT.element_size(), None);
assert_eq!(
Composite1::LAYOUT.alignment().as_devicesize() as usize,
align_of::<Composite1>(),
);
#[derive(BufferContents)]
#[repr(C)]
struct Test2(u64, u8, [u32]);
assert_eq!(
Test2::LAYOUT.head_size() as usize,
size_of::<u64>() + size_of::<u32>(),
);
assert_eq!(
Test2::LAYOUT.element_size().unwrap() as usize,
size_of::<u32>(),
);
assert_eq!(
Test2::LAYOUT.alignment().as_devicesize() as usize,
align_of::<u64>(),
);
#[derive(BufferContents)]
#[repr(C)]
struct Composite2(Test1, [f32; 10], Test2);
assert_eq!(
Composite2::LAYOUT.head_size() as usize,
size_of::<Test1>() + size_of::<[f32; 10]>() + size_of::<u64>() + size_of::<u32>(),
);
assert_eq!(
Composite2::LAYOUT.element_size().unwrap() as usize,
size_of::<u32>(),
);
assert_eq!(
Composite2::LAYOUT.alignment().as_devicesize() as usize,
align_of::<u64>(),
);
}
#[test]
fn split_at() {
let (device, _) = gfx_dev_and_queue!();
@ -714,13 +1232,17 @@ mod tests {
}
{
let (left, right) = buffer.clone().split_at(6);
assert!(left.len() == 6);
assert!(right.len() == 0);
let (left, right) = buffer.clone().split_at(5);
assert!(left.len() == 5);
assert!(right.len() == 1);
}
{
assert_should_panic!({ buffer.split_at(7) });
assert_should_panic!({ buffer.clone().split_at(0) });
}
{
assert_should_panic!({ buffer.split_at(6) });
}
}

View File

@ -1197,7 +1197,7 @@ impl UnsafeCommandBufferBuilder {
stages.into(),
offset,
size,
data.as_bytes().as_ptr() as *const _,
data as *const _ as *const _,
);
}

View File

@ -400,7 +400,7 @@ where
assert_eq!(device, dst_buffer.device());
// VUID-vkCmdFillBuffer-size-00026
assert!(dst_buffer.size() != 0);
// Guaranteed by `Subbuffer`
// VUID-vkCmdFillBuffer-dstBuffer-00029
if !dst_buffer
@ -438,7 +438,10 @@ where
D: BufferContents + ?Sized,
Dd: SafeDeref<Target = D> + Send + Sync + 'static,
{
self.validate_update_buffer(dst_buffer.as_bytes(), data.deref().as_bytes())?;
self.validate_update_buffer(
dst_buffer.as_bytes(),
size_of_val(data.deref()) as DeviceSize,
)?;
unsafe {
self.inner.update_buffer(dst_buffer, data)?;
@ -450,7 +453,7 @@ where
fn validate_update_buffer(
&self,
dst_buffer: &Subbuffer<[u8]>,
data: &[u8],
data_size: DeviceSize,
) -> Result<(), ClearError> {
let device = self.device();
@ -473,7 +476,7 @@ where
assert_eq!(device, dst_buffer.device());
// VUID-vkCmdUpdateBuffer-dataSize-arraylength
assert!(size_of_val(data) != 0);
assert!(data_size != 0);
// VUID-vkCmdUpdateBuffer-dstBuffer-00034
if !dst_buffer
@ -488,10 +491,10 @@ where
// VUID-vkCmdUpdateBuffer-dstOffset-00032
// VUID-vkCmdUpdateBuffer-dataSize-00033
if size_of_val(data) as DeviceSize > dst_buffer.size() {
if data_size > dst_buffer.size() {
return Err(ClearError::RegionOutOfBufferBounds {
region_index: 0,
offset_range_end: size_of_val(data) as DeviceSize,
offset_range_end: data_size,
buffer_size: dst_buffer.size(),
});
}
@ -506,18 +509,18 @@ where
}
// VUID-vkCmdUpdateBuffer-dataSize-00037
if size_of_val(data) > 65536 {
if data_size > 65536 {
return Err(ClearError::DataTooLarge {
size: size_of_val(data) as DeviceSize,
size: data_size,
max: 65536,
});
}
// VUID-vkCmdUpdateBuffer-dataSize-00038
if size_of_val(data) % 4 != 0 {
if data_size % 4 != 0 {
return Err(ClearError::SizeNotAlignedForBuffer {
region_index: 0,
size: size_of_val(data) as DeviceSize,
size: data_size,
required_alignment: 4,
});
}
@ -736,12 +739,12 @@ impl SyncCommandBufferBuilder {
D: BufferContents + ?Sized,
Dd: SafeDeref<Target = D> + Send + Sync + 'static,
{
struct Cmd<Dd> {
dst_buffer: Subbuffer<[u8]>,
struct Cmd<D: ?Sized, Dd> {
dst_buffer: Subbuffer<D>,
data: Dd,
}
impl<D, Dd> Command for Cmd<Dd>
impl<D, Dd> Command for Cmd<D, Dd>
where
D: BufferContents + ?Sized,
Dd: SafeDeref<Target = D> + Send + Sync + 'static,
@ -751,11 +754,10 @@ impl SyncCommandBufferBuilder {
}
unsafe fn send(&self, out: &mut UnsafeCommandBufferBuilder) {
out.update_buffer(&self.dst_buffer, self.data.deref().as_bytes());
out.update_buffer(&self.dst_buffer, self.data.deref());
}
}
let dst_buffer = dst_buffer.into_bytes();
let command_index = self.commands.len();
let command_name = "update_buffer";
let resources = [(
@ -766,7 +768,7 @@ impl SyncCommandBufferBuilder {
secondary_use_ref: None,
},
Resource::Buffer {
buffer: dst_buffer.clone(),
buffer: dst_buffer.as_bytes().clone(),
range: 0..size_of_val(data.deref()) as DeviceSize,
memory: PipelineMemoryAccess {
stages: PipelineStages::ALL_TRANSFER,
@ -887,7 +889,7 @@ impl UnsafeCommandBufferBuilder {
dst_buffer.buffer().handle(),
dst_buffer.offset(),
size_of_val(data) as DeviceSize,
data.as_bytes().as_ptr() as *const _,
data as *const _ as *const _,
);
}
}

View File

@ -71,11 +71,10 @@
//! use vulkano::command_buffer::PrimaryCommandBufferAbstract;
//! use vulkano::command_buffer::SubpassContents;
//!
//! # use vulkano::pipeline::graphics::vertex_input::Vertex;
//! # use bytemuck::{Pod, Zeroable};
//! # use vulkano::{buffer::BufferContents, pipeline::graphics::vertex_input::Vertex};
//!
//! # #[derive(BufferContents, Vertex)]
//! # #[repr(C)]
//! # #[derive(Clone, Copy, Debug, Default, Zeroable, Pod, Vertex)]
//! # struct PosVertex {
//! # #[format(R32G32B32_SFLOAT)]
//! # position: [f32; 3]

View File

@ -28,7 +28,7 @@ use crate::{
DeviceSize, RequiresOneOf, VulkanObject,
};
use smallvec::SmallVec;
use std::{cmp::min, sync::Arc};
use std::{cmp::min, mem::size_of_val, os::raw::c_void, sync::Arc};
impl<L, A> CommandBufferBuilder<L, A>
where
@ -627,15 +627,9 @@ where
&mut self,
pipeline_layout: Arc<PipelineLayout>,
offset: u32,
push_constants: &impl BufferContents,
push_constants: &(impl BufferContents + ?Sized),
) -> &mut Self {
let push_constants = push_constants.as_bytes();
if push_constants.is_empty() {
return self;
}
self.validate_push_constants(&pipeline_layout, offset, push_constants)
self.validate_push_constants(&pipeline_layout, offset, size_of_val(push_constants) as u32)
.unwrap();
unsafe { self.push_constants_unchecked(pipeline_layout, offset, push_constants) }
@ -645,18 +639,18 @@ where
&self,
pipeline_layout: &PipelineLayout,
offset: u32,
push_constants: &[u8],
data_size: u32,
) -> Result<(), BindPushError> {
if offset % 4 != 0 {
return Err(BindPushError::PushConstantsOffsetNotAligned);
}
if push_constants.len() % 4 != 0 {
if data_size % 4 != 0 {
return Err(BindPushError::PushConstantsSizeNotAligned);
}
let mut current_offset = offset;
let mut remaining_size = push_constants.len() as u32;
let mut remaining_size = data_size;
for range in pipeline_layout
.push_constant_ranges_disjoint()
@ -695,9 +689,8 @@ where
offset: u32,
push_constants: &(impl BufferContents + ?Sized),
) -> &mut Self {
let push_constants = push_constants.as_bytes();
let mut current_offset = offset;
let mut remaining_size = push_constants.len() as u32;
let mut remaining_size = size_of_val(push_constants) as u32;
let fns = self.device().fns();
@ -715,15 +708,14 @@ where
// push the minimum of the whole remaining data, and the part until the end of this range
let push_size = min(remaining_size, range.offset + range.size - current_offset);
let data_offset = (current_offset - offset) as usize;
let values = &push_constants[data_offset..(data_offset + push_size as usize)];
(fns.v1_0.cmd_push_constants)(
self.handle(),
pipeline_layout.handle(),
range.stages.into(),
current_offset,
values.len() as u32,
values.as_ptr() as *const _,
push_size,
(push_constants as *const _ as *const c_void).add(data_offset),
);
current_offset += push_size;
@ -743,7 +735,7 @@ where
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2711
self.builder_state
.push_constants
.insert(offset..offset + push_constants.len() as u32);
.insert(offset..offset + size_of_val(push_constants) as u32);
self.builder_state.push_constants_pipeline_layout = Some(pipeline_layout.clone());
self.resources.push(Box::new(pipeline_layout));

View File

@ -532,7 +532,7 @@ where
assert_eq!(device, dst_buffer.device());
// VUID-vkCmdFillBuffer-size-00026
assert!(dst_buffer.size() != 0);
// Guaranteed by `Subbuffer`
// VUID-vkCmdFillBuffer-dstBuffer-00029
if !dst_buffer
@ -614,15 +614,15 @@ where
where
D: BufferContents + ?Sized,
{
self.validate_update_buffer(dst_buffer.as_bytes(), data.as_bytes())?;
self.validate_update_buffer(dst_buffer.as_bytes(), size_of_val(data) as DeviceSize)?;
unsafe { Ok(self.update_buffer_unchecked(dst_buffer.into_bytes(), data.as_bytes())) }
unsafe { Ok(self.update_buffer_unchecked(dst_buffer, data)) }
}
fn validate_update_buffer(
&self,
dst_buffer: &Subbuffer<[u8]>,
data: &[u8],
data_size: DeviceSize,
) -> Result<(), ClearError> {
let device = self.device();
@ -645,7 +645,7 @@ where
assert_eq!(device, dst_buffer.device());
// VUID-vkCmdUpdateBuffer-dataSize-arraylength
assert!(size_of_val(data) != 0);
assert!(data_size != 0);
// VUID-vkCmdUpdateBuffer-dstBuffer-00034
if !dst_buffer
@ -660,10 +660,10 @@ where
// VUID-vkCmdUpdateBuffer-dstOffset-00032
// VUID-vkCmdUpdateBuffer-dataSize-00033
if size_of_val(data) as DeviceSize > dst_buffer.size() {
if data_size > dst_buffer.size() {
return Err(ClearError::RegionOutOfBufferBounds {
region_index: 0,
offset_range_end: size_of_val(data) as DeviceSize,
offset_range_end: data_size,
buffer_size: dst_buffer.size(),
});
}
@ -678,18 +678,18 @@ where
}
// VUID-vkCmdUpdateBuffer-dataSize-00037
if size_of_val(data) > 65536 {
if data_size > 65536 {
return Err(ClearError::DataTooLarge {
size: size_of_val(data) as DeviceSize,
size: data_size,
max: 65536,
});
}
// VUID-vkCmdUpdateBuffer-dataSize-00038
if size_of_val(data) % 4 != 0 {
if data_size % 4 != 0 {
return Err(ClearError::SizeNotAlignedForBuffer {
region_index: 0,
size: size_of_val(data) as DeviceSize,
size: data_size,
required_alignment: 4,
});
}
@ -714,7 +714,7 @@ where
dst_buffer.buffer().handle(),
dst_buffer.offset(),
size_of_val(data) as DeviceSize,
data.as_bytes().as_ptr() as *const _,
data as *const _ as *const _,
);
let command_index = self.next_command_index;

View File

@ -184,7 +184,7 @@ impl ImmutableImage {
command_buffer_builder: &mut AutoCommandBufferBuilder<L, A>,
) -> Result<Arc<Self>, ImmutableImageCreationError>
where
[Px]: BufferContents,
Px: BufferContents,
I: IntoIterator<Item = Px>,
I::IntoIter: ExactSizeIterator,
A: CommandBufferAllocator,

View File

@ -17,6 +17,7 @@
//! | `document_unchecked` | Include `_unchecked` functions in the generated documentation. |
//! | `cgmath` | Generate additional definitions and functions using the [`cgmath`] library. |
//! | `nalgebra` | Generate additional definitions and functions using the [`nalgebra`] library. |
//! | `serde` | Enables (de)serialization of certain types using [`serde`]. |
//!
//! # Starting off with Vulkano
//!
@ -104,6 +105,7 @@
//!
//! [`cgmath`]: https://crates.io/crates/cgmath
//! [`nalgebra`]: https://crates.io/crates/nalgebra
//! [`serde`]: https://crates.io/crates/serde
//! [`VulkanLibrary`]: crate::VulkanLibrary
//! [`Instance`]: crate::instance::Instance
//! [`Surface`]: crate::swapchain::Surface
@ -182,6 +184,7 @@ pub mod instance;
pub mod library;
mod macros;
pub mod memory;
pub mod padded;
pub mod pipeline;
pub mod query;
mod range_map;

View File

@ -888,4 +888,15 @@ macro_rules! vulkan_bitflags_enum {
}
}
pub(crate) use {vulkan_bitflags, vulkan_bitflags_enum, vulkan_enum};
// TODO: Replace with the `?` operator once its constness is stabilized.
macro_rules! try_opt {
($e:expr) => {
if let Some(val) = $e {
val
} else {
return None;
}
};
}
pub(crate) use {try_opt, vulkan_bitflags, vulkan_bitflags_enum, vulkan_enum};

View File

@ -8,10 +8,10 @@
// according to those terms.
use super::align_up;
use crate::{DeviceSize, NonZeroDeviceSize};
use crate::{macros::try_opt, DeviceSize, NonZeroDeviceSize};
use std::{
alloc::Layout,
cmp::{self, Ordering},
cmp::Ordering,
error::Error,
fmt::{Debug, Display, Formatter, Result as FmtResult},
hash::{Hash, Hasher},
@ -40,7 +40,7 @@ impl DeviceLayout {
/// zero size.
#[inline]
pub const fn from_layout(layout: Layout) -> Result<Self, TryFromLayoutError> {
let (size, alignment) = (layout.size(), layout.align());
let (size, alignment) = Self::size_alignment_from_layout(&layout);
#[cfg(any(
target_pointer_width = "64",
@ -51,10 +51,7 @@ impl DeviceLayout {
const _: () = assert!(size_of::<DeviceSize>() >= size_of::<usize>());
const _: () = assert!(DeviceLayout::MAX_SIZE >= isize::MAX as DeviceSize);
if let Some(size) = NonZeroDeviceSize::new(size as DeviceSize) {
// SAFETY: `Layout`'s alignment-invariant guarantees that it is a power of two.
let alignment = unsafe { DeviceAlignment::new_unchecked(alignment as DeviceSize) };
if let Some(size) = NonZeroDeviceSize::new(size) {
// SAFETY: Under the precondition that `usize` can't overflow `DeviceSize`, which
// we checked above, `Layout`'s overflow-invariant is the same if not stricter than
// that of `DeviceLayout`.
@ -102,14 +99,10 @@ impl DeviceLayout {
/// exceed [`DeviceLayout::MAX_SIZE`] when rounded up to the nearest multiple of `alignment`.
#[inline]
pub const fn from_size_alignment(size: DeviceSize, alignment: DeviceSize) -> Option<Self> {
if let (Some(size), Some(alignment)) = (
NonZeroDeviceSize::new(size),
DeviceAlignment::new(alignment),
) {
let size = try_opt!(NonZeroDeviceSize::new(size));
let alignment = try_opt!(DeviceAlignment::new(alignment));
DeviceLayout::new(size, alignment)
} else {
None
}
}
/// Creates a new `DeviceLayout` from the given `size` and `alignment` without doing any
@ -121,6 +114,7 @@ impl DeviceLayout {
/// - `alignment` must be a power of two, which also means it must be non-zero.
/// - `size`, when rounded up to the nearest multiple of `alignment`, must not exceed
/// [`DeviceLayout::MAX_SIZE`].
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub const unsafe fn from_size_alignment_unchecked(
size: DeviceSize,
@ -159,6 +153,7 @@ impl DeviceLayout {
///
/// - `size`, when rounded up to the nearest multiple of `alignment`, must not exceed
/// [`DeviceLayout::MAX_SIZE`].
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub const unsafe fn new_unchecked(size: NonZeroDeviceSize, alignment: DeviceAlignment) -> Self {
debug_assert!(size.get() <= Self::max_size_for_alignment(alignment));
@ -183,8 +178,8 @@ impl DeviceLayout {
/// Returns [`None`] if `self.size()` would overflow [`DeviceLayout::MAX_SIZE`] when rounded up
/// to the nearest multiple of `alignment`.
#[inline]
pub fn align_to(&self, alignment: DeviceAlignment) -> Option<Self> {
DeviceLayout::new(self.size, cmp::max(self.alignment, alignment))
pub const fn align_to(&self, alignment: DeviceAlignment) -> Option<Self> {
DeviceLayout::new(self.size, DeviceAlignment::max(self.alignment, alignment))
}
/// Returns the amount of padding that needs to be added to `self.size()` such that the result
@ -220,11 +215,12 @@ impl DeviceLayout {
/// returns [`None`] on arithmetic overflow or when the total size would exceed
/// [`DeviceLayout::MAX_SIZE`].
#[inline]
pub fn repeat(&self, n: NonZeroDeviceSize) -> Option<(Self, DeviceSize)> {
pub const fn repeat(&self, n: NonZeroDeviceSize) -> Option<(Self, DeviceSize)> {
let stride = self.padded_size();
let size = stride.checked_mul(n)?;
let size = try_opt!(stride.checked_mul(n));
let layout = try_opt!(DeviceLayout::new(size, self.alignment));
DeviceLayout::new(size, self.alignment).map(|layout| (layout, stride.get()))
Some((layout, stride.get()))
}
/// Creates a new `DeviceLayout` describing the record for `self` followed by `next`, including
@ -242,13 +238,70 @@ impl DeviceLayout {
///
/// [`pad_to_alignment`]: Self::pad_to_alignment
#[inline]
pub fn extend(&self, next: Self) -> Option<(Self, DeviceSize)> {
let padding = self.padding_needed_for(next.alignment);
let offset = self.size.checked_add(padding)?;
let size = offset.checked_add(next.size())?;
let alignment = cmp::max(self.alignment, next.alignment);
pub const fn extend(&self, next: Self) -> Option<(Self, DeviceSize)> {
self.extend_inner(next.size(), next.alignment)
}
DeviceLayout::new(size, alignment).map(|layout| (layout, offset.get()))
/// Same as [`extend`], except it extends with a [`Layout`].
///
/// [`extend`]: Self::extend
#[inline]
pub const fn extend_with_layout(&self, next: Layout) -> Option<(Self, DeviceSize)> {
let (next_size, next_alignment) = Self::size_alignment_from_layout(&next);
self.extend_inner(next_size, next_alignment)
}
const fn extend_inner(
&self,
next_size: DeviceSize,
next_alignment: DeviceAlignment,
) -> Option<(Self, DeviceSize)> {
let padding = self.padding_needed_for(next_alignment);
let offset = try_opt!(self.size.checked_add(padding));
let size = try_opt!(offset.checked_add(next_size));
let alignment = DeviceAlignment::max(self.alignment, next_alignment);
let layout = try_opt!(DeviceLayout::new(size, alignment));
Some((layout, offset.get()))
}
/// Creates a new `DeviceLayout` describing the record for the `previous` [`Layout`] followed
/// by `self`. This function is otherwise the same as [`extend`].
///
/// [`extend`]: Self::extend
#[inline]
pub const fn extend_from_layout(self, previous: &Layout) -> Option<(Self, DeviceSize)> {
let (size, alignment) = Self::size_alignment_from_layout(previous);
let padding = align_up(size, self.alignment).wrapping_sub(size);
let offset = try_opt!(size.checked_add(padding));
let size = try_opt!(self.size.checked_add(offset));
let alignment = DeviceAlignment::max(alignment, self.alignment);
let layout = try_opt!(DeviceLayout::new(size, alignment));
Some((layout, offset))
}
#[inline(always)]
const fn size_alignment_from_layout(layout: &Layout) -> (DeviceSize, DeviceAlignment) {
#[cfg(any(
target_pointer_width = "64",
target_pointer_width = "32",
target_pointer_width = "16",
))]
{
const _: () = assert!(size_of::<DeviceSize>() >= size_of::<usize>());
const _: () = assert!(DeviceLayout::MAX_SIZE >= isize::MAX as DeviceSize);
// We checked that `usize` can't overflow `DeviceSize`, so this can't truncate.
let (size, alignment) = (layout.size() as DeviceSize, layout.align() as DeviceSize);
// SAFETY: `Layout`'s alignment-invariant guarantees that it is a power of two.
let alignment = unsafe { DeviceAlignment::new_unchecked(alignment) };
(size, alignment)
}
}
}
@ -374,6 +427,16 @@ impl DeviceAlignment {
pub const fn log2(self) -> u32 {
self.as_nonzero().trailing_zeros()
}
// TODO: Replace with `Ord::max` once its constness is stabilized.
#[inline(always)]
pub(crate) const fn max(self, other: Self) -> Self {
if self.as_devicesize() >= other.as_devicesize() {
self
} else {
other
}
}
}
impl Debug for DeviceAlignment {

View File

@ -204,28 +204,6 @@ impl MemoryAlloc {
.map(|ptr| slice::from_raw_parts_mut(ptr.as_ptr().cast(), self.size as usize))
}
pub(crate) unsafe fn read(&self, range: Range<DeviceSize>) -> Option<&[u8]> {
debug_assert!(!range.is_empty() && range.end <= self.size);
self.mapped_ptr.map(|ptr| {
slice::from_raw_parts(
ptr.as_ptr().add(range.start as usize).cast(),
(range.end - range.start) as usize,
)
})
}
pub(crate) unsafe fn write(&self, range: Range<DeviceSize>) -> Option<&mut [u8]> {
debug_assert!(!range.is_empty() && range.end <= self.size);
self.mapped_ptr.map(|ptr| {
slice::from_raw_parts_mut(
ptr.as_ptr().add(range.start as usize).cast(),
(range.end - range.start) as usize,
)
})
}
pub(crate) fn atom_size(&self) -> Option<DeviceAlignment> {
self.atom_size
}

338
vulkano/src/padded.rs Normal file
View File

@ -0,0 +1,338 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! A newtype wrapper for enforcing correct alignment for external types.
use crate::buffer::{BufferContents, BufferContentsLayout};
#[cfg(feature = "serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{
alloc::Layout,
cmp::Ordering,
ffi::c_void,
fmt::{Debug, Display, Formatter, Result as FmtResult},
hash::{Hash, Hasher},
mem::{align_of, size_of, MaybeUninit},
ops::{Deref, DerefMut},
};
/// A newtype wrapper around `T`, with `N` bytes of trailing padding.
///
/// In Vulkan, the layout of buffer contents is not necessarily as one would expect from the type
/// signature in the shader code. For example, the *extended layout* or *std140 layout* in GLSL,
/// which is used for uniform buffers by default, requires that array elements are aligned to 16
/// bytes at minimum. That means that even if the array contains a scalar type like `u32` for
/// example, it must be aligned to 16 bytes. We can not enforce that with primitive Rust types
/// alone. In such cases, we can use `Padded` to enforce correct alignment on the Rust side.
///
/// See also [the `shader` module documentation] for more information about layout in shaders.
///
/// # Examples
///
/// ## Aligning struct members
///
/// Consider this GLSL code:
///
/// ```glsl
/// layout(binding = 0) uniform MyData {
/// int x;
/// vec3 y;
/// vec4 z;
/// };
/// ```
///
/// By default, the alignment rules require that `y` and `z` are placed at an offset that is an
/// integer multiple of 16. However, `x` is only 4 bytes, which means that there must be 12 bytes
/// of padding between `x` and `y`. Furthermore, `y` is only 12 bytes, which means that there must
/// be 4 bytes of padding between `y` and `z`.
///
/// We can model this in Rust using `Padded`:
///
/// ```
/// # use vulkano::{buffer::BufferContents, padded::Padded};
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// x: Padded<i32, 12>,
/// y: Padded<[f32; 3], 4>,
/// z: [f32; 4],
/// }
///
/// let data = MyData {
/// x: Padded(42),
/// y: Padded([1.0, 2.0, 3.0]),
/// z: [10.0; 4],
/// };
/// ```
///
/// **But note that this layout is extremely suboptimal.** What you should do instead is reorder
/// your fields such that you don't need any padding:
///
/// ```glsl
/// layout(binding = 0) uniform MyData {
/// vec3 y;
/// int x;
/// vec4 z;
/// };
/// ```
///
/// ```
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// y: [f32; 3],
/// x: i32,
/// z: [f32; 4],
/// }
/// ```
///
/// This way, the fields are aligned naturally. But reordering fields is not always an option: the
/// notable case being when your structure only contains `vec3`s and `vec4`s, or `vec3`s and
/// `vec2`s, so that there are no scalar fields to fill the gaps with.
///
/// ## Aligning array elements
///
/// If you need an array of `vec3`s, then that necessitates that each array element has 4 bytes of
/// trailing padding. The same goes for a matrix with 3 rows, each column will have to have 4 bytes
/// of trailing padding (assuming its column-major).
///
/// We can model those using `Padded` too:
///
/// ```glsl
/// layout(binding = 0) uniform MyData {
/// vec3 x[10];
/// mat3 y;
/// };
/// ```
///
/// ```
/// # use vulkano::{buffer::BufferContents, padded::Padded};
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// x: [Padded<[f32; 3], 4>; 10],
/// y: [Padded<[f32; 3], 4>; 3],
/// }
/// ```
///
/// Another example would be if you have an array of scalars or `vec2`s inside a uniform block:
///
/// ```glsl
/// layout(binding = 0) uniform MyData {
/// int x[10];
/// vec2 y[10];
/// };
/// ```
///
/// By default, arrays inside uniform blocks must have their elements aligned to 16 bytes at
/// minimum, which would look like this in Rust:
///
/// ```
/// # use vulkano::{buffer::BufferContents, padded::Padded};
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// x: [Padded<i32, 12>; 10],
/// y: [Padded<[f32; 2], 8>; 10],
/// }
/// ```
///
/// **But note again, that this layout is suboptimal.** You can instead use a buffer block instead
/// of the uniform block, if memory usage could become an issue:
///
/// ```glsl
/// layout(binding = 0) buffer MyData {
/// int x[10];
/// vec2 y[10];
/// };
/// ```
///
/// ```
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents)]
/// #[repr(C)]
/// struct MyData {
/// x: [i32; 10],
/// y: [[f32; 2]; 10],
/// }
/// ```
///
/// You may also want to consider using [the `uniform_buffer_standard_layout` feature].
///
/// [the `shader` module documentation]: crate::shader
/// [the `uniform_buffer_standard_layout` feature]: crate::device::Features::uniform_buffer_standard_layout
#[repr(C)]
pub struct Padded<T, const N: usize> {
value: T,
_padding: [MaybeUninit<u8>; N],
}
#[allow(non_snake_case)]
#[doc(hidden)]
#[inline(always)]
pub const fn Padded<T, const N: usize>(value: T) -> Padded<T, N> {
Padded {
value,
_padding: [MaybeUninit::uninit(); N],
}
}
impl<T, const N: usize> AsRef<T> for Padded<T, N> {
fn as_ref(&self) -> &T {
&self.value
}
}
impl<T, const N: usize> AsMut<T> for Padded<T, N> {
fn as_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T, const N: usize> Clone for Padded<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
Padded(self.value.clone())
}
}
impl<T, const N: usize> Copy for Padded<T, N> where T: Copy {}
impl<T, const N: usize> Debug for Padded<T, N>
where
T: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
self.value.fmt(f)
}
}
impl<T, const N: usize> Default for Padded<T, N>
where
T: Default,
{
fn default() -> Self {
Padded(T::default())
}
}
impl<T, const N: usize> Deref for Padded<T, N> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T, const N: usize> DerefMut for Padded<T, N> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.value
}
}
impl<T, const N: usize> Display for Padded<T, N>
where
T: Display,
{
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
self.value.fmt(f)
}
}
impl<T, const N: usize> From<T> for Padded<T, N> {
fn from(value: T) -> Self {
Padded(value)
}
}
impl<T, const N: usize> PartialEq for Padded<T, N>
where
T: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.value == other.value
}
}
impl<T, const N: usize> Eq for Padded<T, N> where T: Eq {}
impl<T, const N: usize> Hash for Padded<T, N>
where
T: Hash,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.value.hash(state);
}
}
impl<T, const N: usize> PartialOrd for Padded<T, N>
where
T: PartialOrd,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<T, const N: usize> Ord for Padded<T, N>
where
T: Ord,
{
fn cmp(&self, other: &Self) -> Ordering {
self.value.cmp(&other.value)
}
}
unsafe impl<T, const N: usize> BufferContents for Padded<T, N>
where
T: BufferContents,
{
const LAYOUT: BufferContentsLayout =
if let Some(layout) = BufferContentsLayout::from_sized(Layout::new::<Self>()) {
layout
} else {
panic!("zero-sized types are not valid buffer contents");
};
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
debug_assert!(range == size_of::<Self>());
debug_assert!(data as usize % align_of::<Self>() == 0);
data.cast()
}
}
#[cfg(feature = "serde")]
impl<T, const N: usize> Serialize for Padded<T, N>
where
T: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.value.serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, T, const N: usize> Deserialize<'de> for Padded<T, N>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
T::deserialize(deserializer).map(Padded)
}
}

View File

@ -10,11 +10,11 @@
//! Configures how input vertices are assembled into primitives.
use crate::{
buffer::BufferContents,
macros::vulkan_enum,
pipeline::{PartialStateMode, StateMode},
DeviceSize,
};
use bytemuck::Pod;
/// The state in a graphics pipeline describing how the input assembly stage should behave.
#[derive(Clone, Copy, Debug)]
@ -199,7 +199,7 @@ impl PrimitiveTopologyClass {
}
/// Trait for types that can be used as indices by the GPU.
pub unsafe trait Index: Pod + Sync + Send {
pub unsafe trait Index: BufferContents + Sized {
/// Returns the type of data.
fn ty() -> IndexType;
}

View File

@ -40,7 +40,7 @@ impl<T: ?Sized> VertexBuffersCollection for Vec<Subbuffer<T>> {
}
}
impl<T, const N: usize> VertexBuffersCollection for [Subbuffer<T>; N] {
impl<T: ?Sized, const N: usize> VertexBuffersCollection for [Subbuffer<T>; N] {
fn into_vec(self) -> Vec<Subbuffer<[u8]>> {
self.into_iter().map(Subbuffer::into_bytes).collect()
}

View File

@ -14,9 +14,9 @@ use crate::format::Format;
/// # Examples
///
/// ```
/// # use bytemuck::{Zeroable, Pod};
/// # use vulkano::buffer::BufferContents;
/// #[derive(BufferContents, Default)]
/// #[repr(C)]
/// #[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
/// struct Vertex {
/// position: [f32; 3],
/// color: [f32; 4],
@ -26,7 +26,7 @@ use crate::format::Format;
/// ```
#[deprecated(
since = "0.33.0",
note = "Derive `Vertex` instead and use field-level attributes to specify format"
note = "derive `Vertex` instead and use field-level attributes to specify format"
)]
#[macro_export]
macro_rules! impl_vertex {

View File

@ -8,8 +8,7 @@
// according to those terms.
use super::VertexInputRate;
use crate::format::Format;
use bytemuck::Pod;
use crate::{buffer::BufferContents, format::Format};
use std::collections::HashMap;
pub use vulkano_macros::Vertex;
@ -21,12 +20,12 @@ pub use vulkano_macros::Vertex;
///
/// The vertex trait can be derived and the format has to be specified using the `format`
/// field-level attribute:
/// ```
/// use bytemuck::{Pod, Zeroable};
/// use vulkano::pipeline::graphics::vertex_input::Vertex;
///
/// ```
/// use vulkano::{buffer::BufferContents, pipeline::graphics::vertex_input::Vertex};
///
/// #[derive(BufferContents, Vertex)]
/// #[repr(C)]
/// #[derive(Clone, Copy, Debug, Default, Pod, Zeroable, Vertex)]
/// struct MyVertex {
/// // Every field needs to explicitly state the desired shader input format
/// #[format(R32G32B32_SFLOAT)]
@ -38,10 +37,12 @@ pub use vulkano_macros::Vertex;
/// proj: [f32; 16],
/// }
/// ```
pub unsafe trait Vertex: Pod + Send + Sync + 'static {
pub unsafe trait Vertex: BufferContents + Sized {
/// Returns the information about this Vertex type.
fn per_vertex() -> VertexBufferDescription;
fn per_instance() -> VertexBufferDescription;
fn per_instance_with_divisor(divisor: u32) -> VertexBufferDescription;
}
@ -100,6 +101,7 @@ pub struct VertexMemberInfo {
}
impl VertexMemberInfo {
#[inline]
pub fn num_components(&self) -> u32 {
self.format
.components()

View File

@ -14,11 +14,11 @@
//! pool and the slot id within that query pool.
use crate::{
buffer::BufferContents,
device::{Device, DeviceOwned},
macros::vulkan_bitflags,
DeviceSize, OomError, RequirementNotMet, RequiresOneOf, VulkanError, VulkanObject,
};
use bytemuck::Pod;
use std::{
error::Error,
ffi::c_void,
@ -513,7 +513,7 @@ impl From<RequirementNotMet> for GetResultsError {
/// # Safety
/// This is implemented for `u32` and `u64`. Unless you really know what you're doing, you should
/// not implement this trait for any other type.
pub unsafe trait QueryResultElement: Pod + Sync + Send {
pub unsafe trait QueryResultElement: BufferContents + Sized {
const FLAG: ash::vk::QueryResultFlags;
}