mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-28 17:54:45 +00:00
43e2db0dbd
* Make `CommandBufferAllocator` object-safe, remove the generics * Fix tests * Fix examples * Remove the panic * Remove outdated docs * Document `Send + Sync` impl of `UnsafeCommandBuffer`
718 lines
33 KiB
Rust
718 lines
33 KiB
Rust
// Welcome to the triangle example!
|
|
//
|
|
// This is the only example that is entirely detailed. All the other examples avoid code
|
|
// duplication by using helper functions.
|
|
//
|
|
// This example assumes that you are already more or less familiar with graphics programming and
|
|
// that you want to learn Vulkan. This means that for example it won't go into details about what a
|
|
// vertex or a shader is.
|
|
|
|
use std::{error::Error, sync::Arc};
|
|
use vulkano::{
|
|
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage},
|
|
command_buffer::{
|
|
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
|
|
RenderPassBeginInfo, SubpassBeginInfo, SubpassContents,
|
|
},
|
|
device::{
|
|
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
|
QueueFlags,
|
|
},
|
|
image::{view::ImageView, Image, ImageUsage},
|
|
instance::{Instance, InstanceCreateFlags, InstanceCreateInfo},
|
|
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
|
|
pipeline::{
|
|
graphics::{
|
|
color_blend::{ColorBlendAttachmentState, ColorBlendState},
|
|
input_assembly::InputAssemblyState,
|
|
multisample::MultisampleState,
|
|
rasterization::RasterizationState,
|
|
vertex_input::{Vertex, VertexDefinition},
|
|
viewport::{Viewport, ViewportState},
|
|
GraphicsPipelineCreateInfo,
|
|
},
|
|
layout::PipelineDescriptorSetLayoutCreateInfo,
|
|
DynamicState, GraphicsPipeline, PipelineLayout, PipelineShaderStageCreateInfo,
|
|
},
|
|
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
|
|
swapchain::{
|
|
acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
|
|
},
|
|
sync::{self, GpuFuture},
|
|
Validated, VulkanError, VulkanLibrary,
|
|
};
|
|
use winit::{
|
|
event::{Event, WindowEvent},
|
|
event_loop::{ControlFlow, EventLoop},
|
|
window::WindowBuilder,
|
|
};
|
|
|
|
fn main() -> Result<(), impl Error> {
|
|
let event_loop = EventLoop::new().unwrap();
|
|
|
|
let library = VulkanLibrary::new().unwrap();
|
|
|
|
// The first step of any Vulkan program is to create an instance.
|
|
//
|
|
// When we create an instance, we have to pass a list of extensions that we want to enable.
|
|
//
|
|
// All the window-drawing functionalities are part of non-core extensions that we need to
|
|
// enable manually. To do so, we ask `Surface` for the list of extensions required to draw to
|
|
// a window.
|
|
let required_extensions = Surface::required_extensions(&event_loop).unwrap();
|
|
|
|
// Now creating the instance.
|
|
let instance = Instance::new(
|
|
library,
|
|
InstanceCreateInfo {
|
|
// Enable enumerating devices that use non-conformant Vulkan implementations.
|
|
// (e.g. MoltenVK)
|
|
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
|
|
enabled_extensions: required_extensions,
|
|
..Default::default()
|
|
},
|
|
)
|
|
.unwrap();
|
|
|
|
// The objective of this example is to draw a triangle on a window. To do so, we first need to
|
|
// create the window. We use the `WindowBuilder` from the `winit` crate to do that here.
|
|
//
|
|
// Before we can render to a window, we must first create a `vulkano::swapchain::Surface`
|
|
// object from it, which represents the drawable surface of a window. For that we must wrap the
|
|
// `winit::window::Window` in an `Arc`.
|
|
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
|
|
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
|
|
|
|
// Choose device extensions that we're going to use. In order to present images to a surface,
|
|
// we need a `Swapchain`, which is provided by the `khr_swapchain` extension.
|
|
let device_extensions = DeviceExtensions {
|
|
khr_swapchain: true,
|
|
..DeviceExtensions::empty()
|
|
};
|
|
|
|
// We then choose which physical device to use. First, we enumerate all the available physical
|
|
// devices, then apply filters to narrow them down to those that can support our needs.
|
|
let (physical_device, queue_family_index) = instance
|
|
.enumerate_physical_devices()
|
|
.unwrap()
|
|
.filter(|p| {
|
|
// Some devices may not support the extensions or features that your application, or
|
|
// report properties and limits that are not sufficient for your application. These
|
|
// should be filtered out here.
|
|
p.supported_extensions().contains(&device_extensions)
|
|
})
|
|
.filter_map(|p| {
|
|
// For each physical device, we try to find a suitable queue family that will execute
|
|
// our draw commands.
|
|
//
|
|
// Devices can provide multiple queues to run commands in parallel (for example a draw
|
|
// queue and a compute queue), similar to CPU threads. This is something you have to
|
|
// have to manage manually in Vulkan. Queues of the same type belong to the same queue
|
|
// family.
|
|
//
|
|
// Here, we look for a single queue family that is suitable for our purposes. In a
|
|
// real-world application, you may want to use a separate dedicated transfer queue to
|
|
// handle data transfers in parallel with graphics operations. You may also need a
|
|
// separate queue for compute operations, if your application uses those.
|
|
p.queue_family_properties()
|
|
.iter()
|
|
.enumerate()
|
|
.position(|(i, q)| {
|
|
// We select a queue family that supports graphics operations. When drawing to
|
|
// a window surface, as we do in this example, we also need to check that
|
|
// queues in this queue family are capable of presenting images to the surface.
|
|
q.queue_flags.intersects(QueueFlags::GRAPHICS)
|
|
&& p.surface_support(i as u32, &surface).unwrap_or(false)
|
|
})
|
|
// The code here searches for the first queue family that is suitable. If none is
|
|
// found, `None` is returned to `filter_map`, which disqualifies this physical
|
|
// device.
|
|
.map(|i| (p, i as u32))
|
|
})
|
|
// All the physical devices that pass the filters above are suitable for the application.
|
|
// However, not every device is equal, some are preferred over others. Now, we assign each
|
|
// physical device a score, and pick the device with the lowest ("best") score.
|
|
//
|
|
// In this example, we simply select the best-scoring device to use in the application.
|
|
// In a real-world setting, you may want to use the best-scoring device only as a "default"
|
|
// or "recommended" device, and let the user choose the device themself.
|
|
.min_by_key(|(p, _)| {
|
|
// We assign a lower score to device types that are likely to be faster/better.
|
|
match p.properties().device_type {
|
|
PhysicalDeviceType::DiscreteGpu => 0,
|
|
PhysicalDeviceType::IntegratedGpu => 1,
|
|
PhysicalDeviceType::VirtualGpu => 2,
|
|
PhysicalDeviceType::Cpu => 3,
|
|
PhysicalDeviceType::Other => 4,
|
|
_ => 5,
|
|
}
|
|
})
|
|
.expect("no suitable physical device found");
|
|
|
|
// Some little debug infos.
|
|
println!(
|
|
"Using device: {} (type: {:?})",
|
|
physical_device.properties().device_name,
|
|
physical_device.properties().device_type,
|
|
);
|
|
|
|
// Now initializing the device. This is probably the most important object of Vulkan.
|
|
//
|
|
// An iterator of created queues is returned by the function alongside the device.
|
|
let (device, mut queues) = Device::new(
|
|
// Which physical device to connect to.
|
|
physical_device,
|
|
DeviceCreateInfo {
|
|
// A list of optional features and extensions that our program needs to work correctly.
|
|
// Some parts of the Vulkan specs are optional and must be enabled manually at device
|
|
// creation. In this example the only thing we are going to need is the `khr_swapchain`
|
|
// extension that allows us to draw to a window.
|
|
enabled_extensions: device_extensions,
|
|
|
|
// The list of queues that we are going to use. Here we only use one queue, from the
|
|
// previously chosen queue family.
|
|
queue_create_infos: vec![QueueCreateInfo {
|
|
queue_family_index,
|
|
..Default::default()
|
|
}],
|
|
|
|
..Default::default()
|
|
},
|
|
)
|
|
.unwrap();
|
|
|
|
// Since we can request multiple queues, the `queues` variable is in fact an iterator. We only
|
|
// use one queue in this example, so we just retrieve the first and only element of the
|
|
// iterator.
|
|
let queue = queues.next().unwrap();
|
|
|
|
// Before we can draw on the surface, we have to create what is called a swapchain. Creating a
|
|
// swapchain allocates the color buffers that will contain the image that will ultimately be
|
|
// visible on the screen. These images are returned alongside the swapchain.
|
|
let (mut swapchain, images) = {
|
|
// Querying the capabilities of the surface. When we create the swapchain we can only pass
|
|
// values that are allowed by the capabilities.
|
|
let surface_capabilities = device
|
|
.physical_device()
|
|
.surface_capabilities(&surface, Default::default())
|
|
.unwrap();
|
|
|
|
// Choosing the internal format that the images will have.
|
|
let image_format = device
|
|
.physical_device()
|
|
.surface_formats(&surface, Default::default())
|
|
.unwrap()[0]
|
|
.0;
|
|
|
|
// Please take a look at the docs for the meaning of the parameters we didn't mention.
|
|
Swapchain::new(
|
|
device.clone(),
|
|
surface,
|
|
SwapchainCreateInfo {
|
|
// Some drivers report an `min_image_count` of 1, but fullscreen mode requires at
|
|
// least 2. Therefore we must ensure the count is at least 2, otherwise the program
|
|
// would crash when entering fullscreen mode on those drivers.
|
|
min_image_count: surface_capabilities.min_image_count.max(2),
|
|
|
|
image_format,
|
|
|
|
// The size of the window, only used to initially setup the swapchain.
|
|
//
|
|
// NOTE:
|
|
// On some drivers the swapchain extent is specified by
|
|
// `surface_capabilities.current_extent` and the swapchain size must use this
|
|
// extent. This extent is always the same as the window size.
|
|
//
|
|
// However, other drivers don't specify a value, i.e.
|
|
// `surface_capabilities.current_extent` is `None`. These drivers will allow
|
|
// anything, but the only sensible value is the window size.
|
|
//
|
|
// Both of these cases need the swapchain to use the window size, so we just
|
|
// use that.
|
|
image_extent: window.inner_size().into(),
|
|
|
|
image_usage: ImageUsage::COLOR_ATTACHMENT,
|
|
|
|
// The alpha mode indicates how the alpha value of the final image will behave. For
|
|
// example, you can choose whether the window will be opaque or transparent.
|
|
composite_alpha: surface_capabilities
|
|
.supported_composite_alpha
|
|
.into_iter()
|
|
.next()
|
|
.unwrap(),
|
|
|
|
..Default::default()
|
|
},
|
|
)
|
|
.unwrap()
|
|
};
|
|
|
|
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
|
|
|
|
// We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here
|
|
// to force rustc to use a defined layout for our data, as the default representation has *no
|
|
// guarantees*.
|
|
#[derive(BufferContents, Vertex)]
|
|
#[repr(C)]
|
|
struct Vertex {
|
|
#[format(R32G32_SFLOAT)]
|
|
position: [f32; 2],
|
|
}
|
|
|
|
let vertices = [
|
|
Vertex {
|
|
position: [-0.5, -0.25],
|
|
},
|
|
Vertex {
|
|
position: [0.0, 0.5],
|
|
},
|
|
Vertex {
|
|
position: [0.25, -0.1],
|
|
},
|
|
];
|
|
let vertex_buffer = Buffer::from_iter(
|
|
memory_allocator,
|
|
BufferCreateInfo {
|
|
usage: BufferUsage::VERTEX_BUFFER,
|
|
..Default::default()
|
|
},
|
|
AllocationCreateInfo {
|
|
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
|
|
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
|
|
..Default::default()
|
|
},
|
|
vertices,
|
|
)
|
|
.unwrap();
|
|
|
|
// The next step is to create the shaders.
|
|
//
|
|
// The raw shader creation API provided by the vulkano library is unsafe for various reasons,
|
|
// so The `shader!` macro provides a way to generate a Rust module from GLSL source - in the
|
|
// example below, the source is provided as a string input directly to the shader, but a path
|
|
// to a source file can be provided as well. Note that the user must specify the type of shader
|
|
// (e.g. "vertex", "fragment", etc.) using the `ty` option of the macro.
|
|
//
|
|
// The items generated by the `shader!` macro include a `load` function which loads the shader
|
|
// using an input logical device. The module also includes type definitions for layout
|
|
// structures defined in the shader source, for example uniforms and push constants.
|
|
//
|
|
// A more detailed overview of what the `shader!` macro generates can be found in the
|
|
// vulkano-shaders crate docs. You can view them at https://docs.rs/vulkano-shaders/
|
|
mod vs {
|
|
vulkano_shaders::shader! {
|
|
ty: "vertex",
|
|
src: r"
|
|
#version 450
|
|
|
|
layout(location = 0) in vec2 position;
|
|
|
|
void main() {
|
|
gl_Position = vec4(position, 0.0, 1.0);
|
|
}
|
|
",
|
|
}
|
|
}
|
|
|
|
mod fs {
|
|
vulkano_shaders::shader! {
|
|
ty: "fragment",
|
|
src: r"
|
|
#version 450
|
|
|
|
layout(location = 0) out vec4 f_color;
|
|
|
|
void main() {
|
|
f_color = vec4(1.0, 0.0, 0.0, 1.0);
|
|
}
|
|
",
|
|
}
|
|
}
|
|
|
|
// At this point, OpenGL initialization would be finished. However in Vulkan it is not. OpenGL
|
|
// implicitly does a lot of computation whenever you draw. In Vulkan, you have to do all this
|
|
// manually.
|
|
|
|
// The next step is to create a *render pass*, which is an object that describes where the
|
|
// output of the graphics pipeline will go. It describes the layout of the images where the
|
|
// colors, depth and/or stencil information will be written.
|
|
let render_pass = vulkano::single_pass_renderpass!(
|
|
device.clone(),
|
|
attachments: {
|
|
// `color` is a custom name we give to the first and only attachment.
|
|
color: {
|
|
// `format: <ty>` indicates the type of the format of the image. This has to be one
|
|
// of the types of the `vulkano::format` module (or alternatively one of your
|
|
// structs that implements the `FormatDesc` trait). Here we use the same format as
|
|
// the swapchain.
|
|
format: swapchain.image_format(),
|
|
// `samples: 1` means that we ask the GPU to use one sample to determine the value
|
|
// of each pixel in the color attachment. We could use a larger value
|
|
// (multisampling) for antialiasing. An example of this can be found in
|
|
// msaa-renderpass.rs.
|
|
samples: 1,
|
|
// `load_op: Clear` means that we ask the GPU to clear the content of this
|
|
// attachment at the start of the drawing.
|
|
load_op: Clear,
|
|
// `store_op: Store` means that we ask the GPU to store the output of the draw in
|
|
// the actual image. We could also ask it to discard the result.
|
|
store_op: Store,
|
|
},
|
|
},
|
|
pass: {
|
|
// We use the attachment named `color` as the one and only color attachment.
|
|
color: [color],
|
|
// No depth-stencil attachment is indicated with empty brackets.
|
|
depth_stencil: {},
|
|
},
|
|
)
|
|
.unwrap();
|
|
|
|
// Before we draw, we have to create what is called a **pipeline**. A pipeline describes how
|
|
// a GPU operation is to be performed. It is similar to an OpenGL program, but it also contains
|
|
// many settings for customization, all baked into a single object. For drawing, we create
|
|
// a **graphics** pipeline, but there are also other types of pipeline.
|
|
let pipeline = {
|
|
// First, we load the shaders that the pipeline will use:
|
|
// the vertex shader and the fragment shader.
|
|
//
|
|
// A Vulkan shader can in theory contain multiple entry points, so we have to specify which
|
|
// one.
|
|
let vs = vs::load(device.clone())
|
|
.unwrap()
|
|
.entry_point("main")
|
|
.unwrap();
|
|
let fs = fs::load(device.clone())
|
|
.unwrap()
|
|
.entry_point("main")
|
|
.unwrap();
|
|
|
|
// Automatically generate a vertex input state from the vertex shader's input interface,
|
|
// that takes a single vertex buffer containing `Vertex` structs.
|
|
let vertex_input_state = Vertex::per_vertex()
|
|
.definition(&vs.info().input_interface)
|
|
.unwrap();
|
|
|
|
// Make a list of the shader stages that the pipeline will have.
|
|
let stages = [
|
|
PipelineShaderStageCreateInfo::new(vs),
|
|
PipelineShaderStageCreateInfo::new(fs),
|
|
];
|
|
|
|
// We must now create a **pipeline layout** object, which describes the locations and types of
|
|
// descriptor sets and push constants used by the shaders in the pipeline.
|
|
//
|
|
// Multiple pipelines can share a common layout object, which is more efficient.
|
|
// The shaders in a pipeline must use a subset of the resources described in its pipeline
|
|
// layout, but the pipeline layout is allowed to contain resources that are not present in the
|
|
// shaders; they can be used by shaders in other pipelines that share the same layout.
|
|
// Thus, it is a good idea to design shaders so that many pipelines have common resource
|
|
// locations, which allows them to share pipeline layouts.
|
|
let layout = PipelineLayout::new(
|
|
device.clone(),
|
|
// Since we only have one pipeline in this example, and thus one pipeline layout,
|
|
// we automatically generate the creation info for it from the resources used in the
|
|
// shaders. In a real application, you would specify this information manually so that you
|
|
// can re-use one layout in multiple pipelines.
|
|
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
|
|
.into_pipeline_layout_create_info(device.clone())
|
|
.unwrap(),
|
|
)
|
|
.unwrap();
|
|
|
|
// We have to indicate which subpass of which render pass this pipeline is going to be used
|
|
// in. The pipeline will only be usable from this particular subpass.
|
|
let subpass = Subpass::from(render_pass.clone(), 0).unwrap();
|
|
|
|
// Finally, create the pipeline.
|
|
GraphicsPipeline::new(
|
|
device.clone(),
|
|
None,
|
|
GraphicsPipelineCreateInfo {
|
|
stages: stages.into_iter().collect(),
|
|
// How vertex data is read from the vertex buffers into the vertex shader.
|
|
vertex_input_state: Some(vertex_input_state),
|
|
// How vertices are arranged into primitive shapes.
|
|
// The default primitive shape is a triangle.
|
|
input_assembly_state: Some(InputAssemblyState::default()),
|
|
// How primitives are transformed and clipped to fit the framebuffer.
|
|
// We use a resizable viewport, set to draw over the entire window.
|
|
viewport_state: Some(ViewportState::default()),
|
|
// How polygons are culled and converted into a raster of pixels.
|
|
// The default value does not perform any culling.
|
|
rasterization_state: Some(RasterizationState::default()),
|
|
// How multiple fragment shader samples are converted to a single pixel value.
|
|
// The default value does not perform any multisampling.
|
|
multisample_state: Some(MultisampleState::default()),
|
|
// How pixel values are combined with the values already present in the framebuffer.
|
|
// The default value overwrites the old value with the new one, without any blending.
|
|
color_blend_state: Some(ColorBlendState::with_attachment_states(
|
|
subpass.num_color_attachments(),
|
|
ColorBlendAttachmentState::default(),
|
|
)),
|
|
// Dynamic states allows us to specify parts of the pipeline settings when
|
|
// recording the command buffer, before we perform drawing.
|
|
// Here, we specify that the viewport should be dynamic.
|
|
dynamic_state: [DynamicState::Viewport].into_iter().collect(),
|
|
subpass: Some(subpass.into()),
|
|
..GraphicsPipelineCreateInfo::layout(layout)
|
|
},
|
|
)
|
|
.unwrap()
|
|
};
|
|
|
|
// Dynamic viewports allow us to recreate just the viewport when the window is resized.
|
|
// Otherwise we would have to recreate the whole pipeline.
|
|
let mut viewport = Viewport {
|
|
offset: [0.0, 0.0],
|
|
extent: [0.0, 0.0],
|
|
depth_range: 0.0..=1.0,
|
|
};
|
|
|
|
// The render pass we created above only describes the layout of our framebuffers. Before we
|
|
// can draw we also need to create the actual framebuffers.
|
|
//
|
|
// Since we need to draw to multiple images, we are going to create a different framebuffer for
|
|
// each image.
|
|
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut viewport);
|
|
|
|
// Before we can start creating and recording command buffers, we need a way of allocating
|
|
// them. Vulkano provides a command buffer allocator, which manages raw Vulkan command pools
|
|
// underneath and provides a safe interface for them.
|
|
let command_buffer_allocator = Arc::new(StandardCommandBufferAllocator::new(
|
|
device.clone(),
|
|
Default::default(),
|
|
));
|
|
|
|
// Initialization is finally finished!
|
|
|
|
// In some situations, the swapchain will become invalid by itself. This includes for example
|
|
// when the window is resized (as the images of the swapchain will no longer match the
|
|
// window's) or, on Android, when the application went to the background and goes back to the
|
|
// foreground.
|
|
//
|
|
// In this situation, acquiring a swapchain image or presenting it will return an error.
|
|
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
|
|
// To continue rendering, we need to recreate the swapchain by creating a new swapchain. Here,
|
|
// we remember that we need to do this for the next loop iteration.
|
|
let mut recreate_swapchain = false;
|
|
|
|
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
|
|
// an object that implements the `GpuFuture` trait, which holds the resources for as long as
|
|
// they are in use by the GPU.
|
|
//
|
|
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
|
|
// that, we store the submission of the previous frame here.
|
|
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
|
|
|
|
event_loop.run(move |event, elwt| {
|
|
elwt.set_control_flow(ControlFlow::Poll);
|
|
|
|
match event {
|
|
Event::WindowEvent {
|
|
event: WindowEvent::CloseRequested,
|
|
..
|
|
} => {
|
|
elwt.exit();
|
|
}
|
|
Event::WindowEvent {
|
|
event: WindowEvent::Resized(_),
|
|
..
|
|
} => {
|
|
recreate_swapchain = true;
|
|
}
|
|
Event::WindowEvent {
|
|
event: WindowEvent::RedrawRequested,
|
|
..
|
|
} => {
|
|
// Do not draw the frame when the screen size is zero. On Windows, this can
|
|
// occur when minimizing the application.
|
|
let image_extent: [u32; 2] = window.inner_size().into();
|
|
|
|
if image_extent.contains(&0) {
|
|
return;
|
|
}
|
|
|
|
// It is important to call this function from time to time, otherwise resources
|
|
// will keep accumulating and you will eventually reach an out of memory error.
|
|
// Calling this function polls various fences in order to determine what the GPU
|
|
// has already processed, and frees the resources that are no longer needed.
|
|
previous_frame_end.as_mut().unwrap().cleanup_finished();
|
|
|
|
// Whenever the window resizes we need to recreate everything dependent on the
|
|
// window size. In this example that includes the swapchain, the framebuffers and
|
|
// the dynamic state viewport.
|
|
if recreate_swapchain {
|
|
// Use the new dimensions of the window.
|
|
|
|
let (new_swapchain, new_images) = swapchain
|
|
.recreate(SwapchainCreateInfo {
|
|
image_extent,
|
|
..swapchain.create_info()
|
|
})
|
|
.expect("failed to recreate swapchain");
|
|
|
|
swapchain = new_swapchain;
|
|
|
|
// Because framebuffers contains a reference to the old swapchain, we need to
|
|
// recreate framebuffers as well.
|
|
framebuffers = window_size_dependent_setup(
|
|
&new_images,
|
|
render_pass.clone(),
|
|
&mut viewport,
|
|
);
|
|
|
|
recreate_swapchain = false;
|
|
}
|
|
|
|
// Before we can draw on the output, we have to *acquire* an image from the
|
|
// swapchain. If no image is available (which happens if you submit draw commands
|
|
// too quickly), then the function will block. This operation returns the index of
|
|
// the image that we are allowed to draw upon.
|
|
//
|
|
// This function can block if no image is available. The parameter is an optional
|
|
// timeout after which the function call will return an error.
|
|
let (image_index, suboptimal, acquire_future) =
|
|
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
|
|
Ok(r) => r,
|
|
Err(VulkanError::OutOfDate) => {
|
|
recreate_swapchain = true;
|
|
return;
|
|
}
|
|
Err(e) => panic!("failed to acquire next image: {e}"),
|
|
};
|
|
|
|
// `acquire_next_image` can be successful, but suboptimal. This means that the
|
|
// swapchain image will still work, but it may not display correctly. With some
|
|
// drivers this can be when the window resizes, but it may not cause the swapchain
|
|
// to become out of date.
|
|
if suboptimal {
|
|
recreate_swapchain = true;
|
|
}
|
|
|
|
// In order to draw, we have to build a *command buffer*. The command buffer object
|
|
// holds the list of commands that are going to be executed.
|
|
//
|
|
// Building a command buffer is an expensive operation (usually a few hundred
|
|
// microseconds), but it is known to be a hot path in the driver and is expected to
|
|
// be optimized.
|
|
//
|
|
// Note that we have to pass a queue family when we create the command buffer. The
|
|
// command buffer will only be executable on that given queue family.
|
|
let mut builder = AutoCommandBufferBuilder::primary(
|
|
command_buffer_allocator.clone(),
|
|
queue.queue_family_index(),
|
|
CommandBufferUsage::OneTimeSubmit,
|
|
)
|
|
.unwrap();
|
|
|
|
builder
|
|
// Before we can draw, we have to *enter a render pass*.
|
|
.begin_render_pass(
|
|
RenderPassBeginInfo {
|
|
// A list of values to clear the attachments with. This list contains
|
|
// one item for each attachment in the render pass. In this case, there
|
|
// is only one attachment, and we clear it with a blue color.
|
|
//
|
|
// Only attachments that have `AttachmentLoadOp::Clear` are provided
|
|
// with clear values, any others should use `None` as the clear value.
|
|
clear_values: vec![Some([0.0, 0.0, 1.0, 1.0].into())],
|
|
|
|
..RenderPassBeginInfo::framebuffer(
|
|
framebuffers[image_index as usize].clone(),
|
|
)
|
|
},
|
|
SubpassBeginInfo {
|
|
// The contents of the first (and only) subpass.
|
|
// This can be either `Inline` or `SecondaryCommandBuffers`.
|
|
// The latter is a bit more advanced and is not covered here.
|
|
contents: SubpassContents::Inline,
|
|
..Default::default()
|
|
},
|
|
)
|
|
.unwrap()
|
|
// We are now inside the first subpass of the render pass.
|
|
//
|
|
// TODO: Document state setting and how it affects subsequent draw commands.
|
|
.set_viewport(0, [viewport.clone()].into_iter().collect())
|
|
.unwrap()
|
|
.bind_pipeline_graphics(pipeline.clone())
|
|
.unwrap()
|
|
.bind_vertex_buffers(0, vertex_buffer.clone())
|
|
.unwrap()
|
|
// We add a draw command.
|
|
.draw(vertex_buffer.len() as u32, 1, 0, 0)
|
|
.unwrap()
|
|
// We leave the render pass. Note that if we had multiple subpasses we could
|
|
// have called `next_subpass` to jump to the next subpass.
|
|
.end_render_pass(Default::default())
|
|
.unwrap();
|
|
|
|
// Finish building the command buffer by calling `build`.
|
|
let command_buffer = builder.build().unwrap();
|
|
|
|
let future = previous_frame_end
|
|
.take()
|
|
.unwrap()
|
|
.join(acquire_future)
|
|
.then_execute(queue.clone(), command_buffer)
|
|
.unwrap()
|
|
// The color output is now expected to contain our triangle. But in order to
|
|
// show it on the screen, we have to *present* the image by calling
|
|
// `then_swapchain_present`.
|
|
//
|
|
// This function does not actually present the image immediately. Instead it
|
|
// submits a present command at the end of the queue. This means that it will
|
|
// only be presented once the GPU has finished executing the command buffer
|
|
// that draws the triangle.
|
|
.then_swapchain_present(
|
|
queue.clone(),
|
|
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
|
|
)
|
|
.then_signal_fence_and_flush();
|
|
|
|
match future.map_err(Validated::unwrap) {
|
|
Ok(future) => {
|
|
previous_frame_end = Some(future.boxed());
|
|
}
|
|
Err(VulkanError::OutOfDate) => {
|
|
recreate_swapchain = true;
|
|
previous_frame_end = Some(sync::now(device.clone()).boxed());
|
|
}
|
|
Err(e) => {
|
|
panic!("failed to flush future: {e}");
|
|
// previous_frame_end = Some(sync::now(device.clone()).boxed());
|
|
}
|
|
}
|
|
}
|
|
Event::AboutToWait => window.request_redraw(),
|
|
_ => (),
|
|
}
|
|
})
|
|
}
|
|
|
|
/// This function is called once during initialization, then again whenever the window is resized.
|
|
fn window_size_dependent_setup(
|
|
images: &[Arc<Image>],
|
|
render_pass: Arc<RenderPass>,
|
|
viewport: &mut Viewport,
|
|
) -> Vec<Arc<Framebuffer>> {
|
|
let extent = images[0].extent();
|
|
viewport.extent = [extent[0] as f32, extent[1] as f32];
|
|
|
|
images
|
|
.iter()
|
|
.map(|image| {
|
|
let view = ImageView::new_default(image.clone()).unwrap();
|
|
Framebuffer::new(
|
|
render_pass.clone(),
|
|
FramebufferCreateInfo {
|
|
attachments: vec![view],
|
|
..Default::default()
|
|
},
|
|
)
|
|
.unwrap()
|
|
})
|
|
.collect::<Vec<_>>()
|
|
}
|