vulkano/examples/triangle/main.rs
marc0246 f6bc05df94
Update dependencies (#2571)
* Update dependencies

* fmt
2024-10-10 12:16:14 +02:00

792 lines
36 KiB
Rust

// Welcome to the triangle example!
//
// This is the only example that is entirely detailed. All the other examples avoid code
// duplication by using helper functions.
//
// This example assumes that you are already more or less familiar with graphics programming and
// that you want to learn Vulkan. This means that for example it won't go into details about what a
// vertex or a shader is.
use std::{error::Error, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, CommandBufferBeginInfo, CommandBufferLevel,
CommandBufferUsage, RecordingCommandBuffer, RenderPassBeginInfo, SubpassBeginInfo,
SubpassContents,
},
device::{
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, Queue,
QueueCreateInfo, QueueFlags,
},
image::{view::ImageView, Image, ImageUsage},
instance::{Instance, InstanceCreateFlags, InstanceCreateInfo},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
graphics::{
color_blend::{ColorBlendAttachmentState, ColorBlendState},
input_assembly::InputAssemblyState,
multisample::MultisampleState,
rasterization::RasterizationState,
vertex_input::{Vertex, VertexDefinition},
viewport::{Viewport, ViewportState},
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
DynamicState, GraphicsPipeline, PipelineLayout, PipelineShaderStageCreateInfo,
},
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
swapchain::{
acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
},
sync::{self, GpuFuture},
Validated, VulkanError, VulkanLibrary,
};
use winit::{
application::ApplicationHandler,
event::WindowEvent,
event_loop::{ActiveEventLoop, EventLoop},
window::{Window, WindowId},
};
fn main() -> Result<(), impl Error> {
let event_loop = EventLoop::new().unwrap();
let mut app = App::new(&event_loop);
event_loop.run_app(&mut app)
}
struct App {
instance: Arc<Instance>,
device: Arc<Device>,
queue: Arc<Queue>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
vertex_buffer: Subbuffer<[MyVertex]>,
rcx: Option<RenderContext>,
}
struct RenderContext {
window: Arc<Window>,
swapchain: Arc<Swapchain>,
render_pass: Arc<RenderPass>,
framebuffers: Vec<Arc<Framebuffer>>,
pipeline: Arc<GraphicsPipeline>,
viewport: Viewport,
recreate_swapchain: bool,
previous_frame_end: Option<Box<dyn GpuFuture>>,
}
impl App {
fn new(event_loop: &EventLoop<()>) -> Self {
let library = VulkanLibrary::new().unwrap();
// The first step of any Vulkan program is to create an instance.
//
// When we create an instance, we have to pass a list of extensions that we want to enable.
//
// All the window-drawing functionalities are part of non-core extensions that we need to
// enable manually. To do so, we ask `Surface` for the list of extensions required to draw
// to a window.
let required_extensions = Surface::required_extensions(event_loop).unwrap();
// Now creating the instance.
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant Vulkan implementations.
// (e.g. MoltenVK)
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
// Choose device extensions that we're going to use. In order to present images to a
// surface, we need a `Swapchain`, which is provided by the `khr_swapchain` extension.
let device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
// We then choose which physical device to use. First, we enumerate all the available
// physical devices, then apply filters to narrow them down to those that can support our
// needs.
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| {
// Some devices may not support the extensions or features that your application,
// or report properties and limits that are not sufficient for your application.
// These should be filtered out here.
p.supported_extensions().contains(&device_extensions)
})
.filter_map(|p| {
// For each physical device, we try to find a suitable queue family that will
// execute our draw commands.
//
// Devices can provide multiple queues to run commands in parallel (for example a
// draw queue and a compute queue), similar to CPU threads. This is
// something you have to have to manage manually in Vulkan. Queues
// of the same type belong to the same queue family.
//
// Here, we look for a single queue family that is suitable for our purposes. In a
// real-world application, you may want to use a separate dedicated transfer queue
// to handle data transfers in parallel with graphics operations.
// You may also need a separate queue for compute operations, if
// your application uses those.
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
// We select a queue family that supports graphics operations. When drawing
// to a window surface, as we do in this example, we also need to check
// that queues in this queue family are capable of presenting images to the
// surface.
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.presentation_support(i as u32, event_loop).unwrap()
})
// The code here searches for the first queue family that is suitable. If none
// is found, `None` is returned to `filter_map`, which
// disqualifies this physical device.
.map(|i| (p, i as u32))
})
// All the physical devices that pass the filters above are suitable for the
// application. However, not every device is equal, some are preferred over others.
// Now, we assign each physical device a score, and pick the device with the lowest
// ("best") score.
//
// In this example, we simply select the best-scoring device to use in the application.
// In a real-world setting, you may want to use the best-scoring device only as a
// "default" or "recommended" device, and let the user choose the device themself.
.min_by_key(|(p, _)| {
// We assign a lower score to device types that are likely to be faster/better.
match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
}
})
.expect("no suitable physical device found");
// Some little debug infos.
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
// Now initializing the device. This is probably the most important object of Vulkan.
//
// An iterator of created queues is returned by the function alongside the device.
let (device, mut queues) = Device::new(
// Which physical device to connect to.
physical_device,
DeviceCreateInfo {
// A list of optional features and extensions that our program needs to work
// correctly. Some parts of the Vulkan specs are optional and must be enabled
// manually at device creation. In this example the only thing we are going to need
// is the `khr_swapchain` extension that allows us to draw to a window.
enabled_extensions: device_extensions,
// The list of queues that we are going to use. Here we only use one queue, from
// the previously chosen queue family.
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
..Default::default()
},
)
.unwrap();
// Since we can request multiple queues, the `queues` variable is in fact an iterator. We
// only use one queue in this example, so we just retrieve the first and only element of
// the iterator.
let queue = queues.next().unwrap();
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
// Before we can start creating and recording command buffers, we need a way of allocating
// them. Vulkano provides a command buffer allocator, which manages raw Vulkan command
// pools underneath and provides a safe interface for them.
let command_buffer_allocator = Arc::new(StandardCommandBufferAllocator::new(
device.clone(),
Default::default(),
));
// We now create a buffer that will store the shape of our triangle.
let vertices = [
MyVertex {
position: [-0.5, -0.25],
},
MyVertex {
position: [0.0, 0.5],
},
MyVertex {
position: [0.25, -0.1],
},
];
let vertex_buffer = Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
vertices,
)
.unwrap();
let rcx = None;
App {
instance,
device,
queue,
command_buffer_allocator,
vertex_buffer,
rcx,
}
}
}
impl ApplicationHandler for App {
fn resumed(&mut self, event_loop: &ActiveEventLoop) {
// The objective of this example is to draw a triangle on a window. To do so, we first need
// to create the window. We use the `WindowBuilder` from the `winit` crate to do that here.
//
// Before we can render to a window, we must first create a `vulkano::swapchain::Surface`
// object from it, which represents the drawable surface of a window. For that we must wrap
// the `winit::window::Window` in an `Arc`.
let window = Arc::new(
event_loop
.create_window(Window::default_attributes())
.unwrap(),
);
let surface = Surface::from_window(self.instance.clone(), window.clone()).unwrap();
let window_size = window.inner_size();
// Before we can draw on the surface, we have to create what is called a swapchain.
// Creating a swapchain allocates the color buffers that will contain the image that will
// ultimately be visible on the screen. These images are returned alongside the swapchain.
let (swapchain, images) = {
// Querying the capabilities of the surface. When we create the swapchain we can only
// pass values that are allowed by the capabilities.
let surface_capabilities = self
.device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
// Choosing the internal format that the images will have.
let (image_format, _) = self
.device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0];
// Please take a look at the docs for the meaning of the parameters we didn't mention.
Swapchain::new(
self.device.clone(),
surface,
SwapchainCreateInfo {
// Some drivers report an `min_image_count` of 1, but fullscreen mode requires
// at least 2. Therefore we must ensure the count is at least 2, otherwise the
// program would crash when entering fullscreen mode on those drivers.
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
// The size of the window, only used to initially setup the swapchain.
//
// NOTE:
// On some drivers the swapchain extent is specified by
// `surface_capabilities.current_extent` and the swapchain size must use this
// extent. This extent is always the same as the window size.
//
// However, other drivers don't specify a value, i.e.
// `surface_capabilities.current_extent` is `None`. These drivers will allow
// anything, but the only sensible value is the window size.
//
// Both of these cases need the swapchain to use the window size, so we just
// use that.
image_extent: window_size.into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
// The alpha mode indicates how the alpha value of the final image will behave.
// For example, you can choose whether the window will be
// opaque or transparent.
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
// The next step is to create the shaders.
//
// The raw shader creation API provided by the vulkano library is unsafe for various
// reasons, so The `shader!` macro provides a way to generate a Rust module from GLSL
// source - in the example below, the source is provided as a string input directly to the
// shader, but a path to a source file can be provided as well. Note that the user must
// specify the type of shader (e.g. "vertex", "fragment", etc.) using the `ty` option of
// the macro.
//
// The items generated by the `shader!` macro include a `load` function which loads the
// shader using an input logical device. The module also includes type definitions for
// layout structures defined in the shader source, for example uniforms and push constants.
//
// A more detailed overview of what the `shader!` macro generates can be found in the
// vulkano-shaders crate docs. You can view them at https://docs.rs/vulkano-shaders/
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: r"
#version 450
layout(location = 0) in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: r"
#version 450
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
",
}
}
// The next step is to create a *render pass*, which is an object that describes where the
// output of the graphics pipeline will go. It describes the layout of the images where the
// colors, depth and/or stencil information will be written.
let render_pass = vulkano::single_pass_renderpass!(
self.device.clone(),
attachments: {
// `color` is a custom name we give to the first and only attachment.
color: {
// `format: <ty>` indicates the type of the format of the image. This has to be
// one of the types of the `vulkano::format` module (or alternatively one of
// your structs that implements the `FormatDesc` trait). Here we use the same
// format as the swapchain.
format: swapchain.image_format(),
// `samples: 1` means that we ask the GPU to use one sample to determine the
// value of each pixel in the color attachment. We could use a larger value
// (multisampling) for antialiasing. An example of this can be found in
// msaa-renderpass.rs.
samples: 1,
// `load_op: Clear` means that we ask the GPU to clear the content of this
// attachment at the start of the drawing.
load_op: Clear,
// `store_op: Store` means that we ask the GPU to store the output of the draw
// in the actual image. We could also ask it to discard the result.
store_op: Store,
},
},
pass: {
// We use the attachment named `color` as the one and only color attachment.
color: [color],
// No depth-stencil attachment is indicated with empty brackets.
depth_stencil: {},
},
)
.unwrap();
// The render pass we created above only describes the layout of our framebuffers. Before
// we can draw we also need to create the actual framebuffers.
//
// Since we need to draw to multiple images, we are going to create a different framebuffer
// for each image.
let framebuffers = window_size_dependent_setup(&images, &render_pass);
// Before we draw, we have to create what is called a **pipeline**. A pipeline describes
// how a GPU operation is to be performed. It is similar to an OpenGL program, but it also
// contains many settings for customization, all baked into a single object. For drawing,
// we create a **graphics** pipeline, but there are also other types of pipeline.
let pipeline = {
// First, we load the shaders that the pipeline will use: the vertex shader and the
// fragment shader.
//
// A Vulkan shader can in theory contain multiple entry points, so we have to specify
// which one.
let vs = vs::load(self.device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(self.device.clone())
.unwrap()
.entry_point("main")
.unwrap();
// Automatically generate a vertex input state from the vertex shader's input
// interface, that takes a single vertex buffer containing `Vertex` structs.
let vertex_input_state = MyVertex::per_vertex().definition(&vs).unwrap();
// Make a list of the shader stages that the pipeline will have.
let stages = [
PipelineShaderStageCreateInfo::new(vs),
PipelineShaderStageCreateInfo::new(fs),
];
// We must now create a **pipeline layout** object, which describes the locations and
// types of descriptor sets and push constants used by the shaders in the pipeline.
//
// Multiple pipelines can share a common layout object, which is more efficient. The
// shaders in a pipeline must use a subset of the resources described in its pipeline
// layout, but the pipeline layout is allowed to contain resources that are not present
// in the shaders; they can be used by shaders in other pipelines that share the same
// layout. Thus, it is a good idea to design shaders so that many pipelines have common
// resource locations, which allows them to share pipeline layouts.
let layout = PipelineLayout::new(
self.device.clone(),
// Since we only have one pipeline in this example, and thus one pipeline layout,
// we automatically generate the creation info for it from the resources used in
// the shaders. In a real application, you would specify this information manually
// so that you can re-use one layout in multiple pipelines.
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(self.device.clone())
.unwrap(),
)
.unwrap();
// We have to indicate which subpass of which render pass this pipeline is going to be
// used in. The pipeline will only be usable from this particular subpass.
let subpass = Subpass::from(render_pass.clone(), 0).unwrap();
// Finally, create the pipeline.
GraphicsPipeline::new(
self.device.clone(),
None,
GraphicsPipelineCreateInfo {
stages: stages.into_iter().collect(),
// How vertex data is read from the vertex buffers into the vertex shader.
vertex_input_state: Some(vertex_input_state),
// How vertices are arranged into primitive shapes. The default primitive shape
// is a triangle.
input_assembly_state: Some(InputAssemblyState::default()),
// How primitives are transformed and clipped to fit the framebuffer. We use a
// resizable viewport, set to draw over the entire window.
viewport_state: Some(ViewportState::default()),
// How polygons are culled and converted into a raster of pixels. The default
// value does not perform any culling.
rasterization_state: Some(RasterizationState::default()),
// How multiple fragment shader samples are converted to a single pixel value.
// The default value does not perform any multisampling.
multisample_state: Some(MultisampleState::default()),
// How pixel values are combined with the values already present in the
// framebuffer. The default value overwrites the old value with the new one,
// without any blending.
color_blend_state: Some(ColorBlendState::with_attachment_states(
subpass.num_color_attachments(),
ColorBlendAttachmentState::default(),
)),
// Dynamic states allows us to specify parts of the pipeline settings when
// recording the command buffer, before we perform drawing. Here, we specify
// that the viewport should be dynamic.
dynamic_state: [DynamicState::Viewport].into_iter().collect(),
subpass: Some(subpass.into()),
..GraphicsPipelineCreateInfo::layout(layout)
},
)
.unwrap()
};
// Dynamic viewports allow us to recreate just the viewport when the window is resized.
// Otherwise we would have to recreate the whole pipeline.
let viewport = Viewport {
offset: [0.0, 0.0],
extent: window_size.into(),
depth_range: 0.0..=1.0,
};
// In some situations, the swapchain will become invalid by itself. This includes for
// example when the window is resized (as the images of the swapchain will no longer match
// the window's) or, on Android, when the application went to the background and goes back
// to the foreground.
//
// In this situation, acquiring a swapchain image or presenting it will return an error.
// Rendering to an image of that swapchain will not produce any error, but may or may not
// work. To continue rendering, we need to recreate the swapchain by creating a new
// swapchain. Here, we remember that we need to do this for the next loop iteration.
let recreate_swapchain = false;
// In the `window_event` handler below we are going to submit commands to the GPU.
// Submitting a command produces an object that implements the `GpuFuture` trait, which
// holds the resources for as long as they are in use by the GPU.
//
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to
// avoid that, we store the submission of the previous frame here.
let previous_frame_end = Some(sync::now(self.device.clone()).boxed());
self.rcx = Some(RenderContext {
window,
swapchain,
render_pass,
framebuffers,
pipeline,
viewport,
recreate_swapchain,
previous_frame_end,
});
}
fn window_event(
&mut self,
event_loop: &ActiveEventLoop,
_window_id: WindowId,
event: WindowEvent,
) {
let rcx = self.rcx.as_mut().unwrap();
match event {
WindowEvent::CloseRequested => {
event_loop.exit();
}
WindowEvent::Resized(_) => {
rcx.recreate_swapchain = true;
}
WindowEvent::RedrawRequested => {
let window_size = rcx.window.inner_size();
// Do not draw the frame when the screen size is zero. On Windows, this can occur
// when minimizing the application.
if window_size.width == 0 || window_size.height == 0 {
return;
}
// It is important to call this function from time to time, otherwise resources
// will keep accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU
// has already processed, and frees the resources that are no longer needed.
rcx.previous_frame_end.as_mut().unwrap().cleanup_finished();
// Whenever the window resizes we need to recreate everything dependent on the
// window size. In this example that includes the swapchain, the framebuffers and
// the dynamic state viewport.
if rcx.recreate_swapchain {
// Use the new dimensions of the window.
let (new_swapchain, new_images) = rcx
.swapchain
.recreate(SwapchainCreateInfo {
image_extent: window_size.into(),
..rcx.swapchain.create_info()
})
.expect("failed to recreate swapchain");
rcx.swapchain = new_swapchain;
// Because framebuffers contains a reference to the old swapchain, we need to
// recreate framebuffers as well.
rcx.framebuffers = window_size_dependent_setup(&new_images, &rcx.render_pass);
rcx.viewport.extent = window_size.into();
rcx.recreate_swapchain = false;
}
// Before we can draw on the output, we have to *acquire* an image from the
// swapchain. If no image is available (which happens if you submit draw commands
// too quickly), then the function will block. This operation returns the index of
// the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is an optional
// timeout after which the function call will return an error.
let (image_index, suboptimal, acquire_future) = match acquire_next_image(
rcx.swapchain.clone(),
None,
)
.map_err(Validated::unwrap)
{
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
rcx.recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
// `acquire_next_image` can be successful, but suboptimal. This means that the
// swapchain image will still work, but it may not display correctly. With some
// drivers this can be when the window resizes, but it may not cause the swapchain
// to become out of date.
if suboptimal {
rcx.recreate_swapchain = true;
}
// In order to draw, we have to record a *command buffer*. The command buffer
// object holds the list of commands that are going to be executed.
//
// Recording a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to
// be optimized.
//
// Note that we have to pass a queue family when we create the command buffer. The
// command buffer will only be executable on that given queue family.
let mut builder = RecordingCommandBuffer::new(
self.command_buffer_allocator.clone(),
self.queue.queue_family_index(),
CommandBufferLevel::Primary,
CommandBufferBeginInfo {
usage: CommandBufferUsage::OneTimeSubmit,
..Default::default()
},
)
.unwrap();
builder
// Before we can draw, we have to *enter a render pass*.
.begin_render_pass(
RenderPassBeginInfo {
// A list of values to clear the attachments with. This list contains
// one item for each attachment in the render pass. In this case, there
// is only one attachment, and we clear it with a blue color.
//
// Only attachments that have `AttachmentLoadOp::Clear` are provided
// with clear values, any others should use `None` as the clear value.
clear_values: vec![Some([0.0, 0.0, 1.0, 1.0].into())],
..RenderPassBeginInfo::framebuffer(
rcx.framebuffers[image_index as usize].clone(),
)
},
SubpassBeginInfo {
// The contents of the first (and only) subpass. This can be either
// `Inline` or `SecondaryCommandBuffers`. The latter is a bit more
// advanced and is not covered here.
contents: SubpassContents::Inline,
..Default::default()
},
)
.unwrap()
// We are now inside the first subpass of the render pass.
//
// TODO: Document state setting and how it affects subsequent draw commands.
.set_viewport(0, [rcx.viewport.clone()].into_iter().collect())
.unwrap()
.bind_pipeline_graphics(rcx.pipeline.clone())
.unwrap()
.bind_vertex_buffers(0, self.vertex_buffer.clone())
.unwrap();
unsafe {
builder
// We add a draw command.
.draw(self.vertex_buffer.len() as u32, 1, 0, 0)
.unwrap();
}
builder
// We leave the render pass. Note that if we had multiple subpasses we could
// have called `next_subpass` to jump to the next subpass.
.end_render_pass(Default::default())
.unwrap();
// Finish recording the command buffer by calling `end`.
let command_buffer = builder.end().unwrap();
let future = rcx
.previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(self.queue.clone(), command_buffer)
.unwrap()
// The color output is now expected to contain our triangle. But in order to
// show it on the screen, we have to *present* the image by calling
// `then_swapchain_present`.
//
// This function does not actually present the image immediately. Instead it
// submits a present command at the end of the queue. This means that it will
// only be presented once the GPU has finished executing the command buffer
// that draws the triangle.
.then_swapchain_present(
self.queue.clone(),
SwapchainPresentInfo::swapchain_image_index(
rcx.swapchain.clone(),
image_index,
),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
rcx.previous_frame_end = Some(future.boxed());
}
Err(VulkanError::OutOfDate) => {
rcx.recreate_swapchain = true;
rcx.previous_frame_end = Some(sync::now(self.device.clone()).boxed());
}
Err(e) => {
panic!("failed to flush future: {e}");
// previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
}
_ => {}
}
}
fn about_to_wait(&mut self, _event_loop: &ActiveEventLoop) {
let rcx = self.rcx.as_mut().unwrap();
rcx.window.request_redraw();
}
}
// We use `#[repr(C)]` here to force rustc to use a defined layout for our data, as the default
// representation has *no guarantees*.
#[derive(BufferContents, Vertex)]
#[repr(C)]
struct MyVertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
}
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
images: &[Arc<Image>],
render_pass: &Arc<RenderPass>,
) -> Vec<Arc<Framebuffer>> {
images
.iter()
.map(|image| {
let view = ImageView::new_default(image.clone()).unwrap();
Framebuffer::new(
render_pass.clone(),
FramebufferCreateInfo {
attachments: vec![view],
..Default::default()
},
)
.unwrap()
})
.collect::<Vec<_>>()
}