Remove unrelated comments and apply rustfmt

This commit is contained in:
atynagano 2023-09-13 23:33:15 +09:00
parent 3de4f20154
commit 4e14bd2cdf

View File

@ -7,23 +7,19 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
// Welcome to the triangle example!
//
// This is the only example that is entirely detailed. All the other examples avoid code
// duplication by using helper functions.
//
// This example assumes that you are already more or less familiar with graphics programming and
// that you want to learn Vulkan. This means that for example it won't go into details about what a
// vertex or a shader is.
//
// This version of the triangle example is written using dynamic rendering instead of render pass
// and framebuffer objects. If your device does not support Vulkan 1.3 or the
// `khr_dynamic_rendering` extension, or if you want to see how to support older versions, see the
// original triangle example.
use std::sync::Arc;
use vulkano::acceleration_structure::{
AccelerationStructureBuildSizesInfo, AccelerationStructureBuildType,
};
use vulkano::{
acceleration_structure::{*},
acceleration_structure::{
AccelerationStructure, AccelerationStructureBuildGeometryInfo,
AccelerationStructureBuildRangeInfo, AccelerationStructureCreateInfo,
AccelerationStructureGeometries, AccelerationStructureGeometryInstancesData,
AccelerationStructureGeometryInstancesDataType, AccelerationStructureGeometryTrianglesData,
AccelerationStructureInstance, AccelerationStructureType, BuildAccelerationStructureFlags,
BuildAccelerationStructureMode, GeometryFlags, GeometryInstanceFlags,
},
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage,
@ -33,12 +29,14 @@ use vulkano::{
allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet,
},
device::{
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, Features,
Queue, QueueCreateInfo, QueueFlags,
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, Features, Queue,
QueueCreateInfo, QueueFlags,
},
image::{view::ImageView, Image, ImageUsage},
instance::{Instance, InstanceCreateFlags, InstanceCreateInfo},
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter, StandardMemoryAllocator},
memory::allocator::{
AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter, StandardMemoryAllocator,
},
pipeline::{
graphics::{
color_blend::ColorBlendState,
@ -51,7 +49,8 @@ use vulkano::{
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout, PipelineShaderStageCreateInfo,
GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout,
PipelineShaderStageCreateInfo,
},
render_pass::AttachmentStoreOp,
swapchain::{
@ -70,239 +69,117 @@ fn main() {
let event_loop = EventLoop::new();
let library = VulkanLibrary::new().unwrap();
// The first step of any Vulkan program is to create an instance.
//
// When we create an instance, we have to pass a list of extensions that we want to enable.
//
// All the window-drawing functionalities are part of non-core extensions that we need to
// enable manually. To do so, we ask `Surface` for the list of extensions required to draw to
// a window.
let required_extensions = Surface::required_extensions(&event_loop);
// Now creating the instance.
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant Vulkan implementations.
// (e.g. MoltenVK)
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
.unwrap();
// The objective of this example is to draw a triangle on a window. To do so, we first need to
// create the window. We use the `WindowBuilder` from the `winit` crate to do that here.
//
// Before we can render to a window, we must first create a `vulkano::swapchain::Surface`
// object from it, which represents the drawable surface of a window. For that we must wrap the
// `winit::window::Window` in an `Arc`.
let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap());
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
// Choose device extensions that we're going to use. In order to present images to a surface,
// we need a `Swapchain`, which is provided by the `khr_swapchain` extension.
let mut device_extensions = DeviceExtensions {
khr_acceleration_structure: true,
khr_ray_query: true,
khr_swapchain: true,
..DeviceExtensions::empty()
};
// We then choose which physical device to use. First, we enumerate all the available physical
// devices, then apply filters to narrow them down to those that can support our needs.
let features = Features {
acceleration_structure: true,
buffer_device_address: true,
dynamic_rendering: true,
ray_query: true,
..Features::empty()
};
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| {
// For this example, we require at least Vulkan 1.3, or a device that has the
// `khr_dynamic_rendering` extension available.
p.api_version() >= Version::V1_3 || p.supported_extensions().khr_dynamic_rendering
})
.filter(|p| {
// Some devices may not support the extensions or features that your application, or
// report properties and limits that are not sufficient for your application. These
// should be filtered out here.
p.supported_extensions().contains(&device_extensions)
})
.filter(|p| p.supported_extensions().contains(&device_extensions))
.filter_map(|p| {
// For each physical device, we try to find a suitable queue family that will execute
// our draw commands.
//
// Devices can provide multiple queues to run commands in parallel (for example a draw
// queue and a compute queue), similar to CPU threads. This is something you have to
// have to manage manually in Vulkan. Queues of the same type belong to the same queue
// family.
//
// Here, we look for a single queue family that is suitable for our purposes. In a
// real-world application, you may want to use a separate dedicated transfer queue to
// handle data transfers in parallel with graphics operations. You may also need a
// separate queue for compute operations, if your application uses those.
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
// We select a queue family that supports graphics operations. When drawing to
// a window surface, as we do in this example, we also need to check that
// queues in this queue family are capable of presenting images to the surface.
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
// The code here searches for the first queue family that is suitable. If none is
// found, `None` is returned to `filter_map`, which disqualifies this physical
// device.
.map(|i| (p, i as u32))
})
// All the physical devices that pass the filters above are suitable for the application.
// However, not every device is equal, some are preferred over others. Now, we assign each
// physical device a score, and pick the device with the lowest ("best") score.
//
// In this example, we simply select the best-scoring device to use in the application.
// In a real-world setting, you may want to use the best-scoring device only as a "default"
// or "recommended" device, and let the user choose the device themself.
.min_by_key(|(p, _)| {
// We assign a lower score to device types that are likely to be faster/better.
match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
}
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
})
.expect("no suitable physical device found");
// Some little debug infos.
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type,
);
// If the selected device doesn't have Vulkan 1.3 available, then we need to enable the
// `khr_dynamic_rendering` extension manually. This extension became a core part of Vulkan
// in version 1.3 and later, so it's always available then and it does not need to be enabled.
// We can be sure that this extension will be available on the selected physical device,
// because we filtered out unsuitable devices in the device selection code above.
if physical_device.api_version() < Version::V1_3 {
device_extensions.khr_dynamic_rendering = true;
}
// todo: device compatibility check
device_extensions.khr_ray_query = true;
device_extensions.khr_acceleration_structure = true;
// Now initializing the device. This is probably the most important object of Vulkan.
//
// An iterator of created queues is returned by the function alongside the device.
let (device, mut queues) = Device::new(
// Which physical device to connect to.
physical_device,
DeviceCreateInfo {
// The list of queues that we are going to use. Here we only use one queue, from the
// previously chosen queue family.
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
// A list of optional features and extensions that our program needs to work correctly.
// Some parts of the Vulkan specs are optional and must be enabled manually at device
// creation. In this example the only things we are going to need are the
// `khr_swapchain` extension that allows us to draw to a window, and
// `khr_dynamic_rendering` if we don't have Vulkan 1.3 available.
enabled_extensions: device_extensions,
// In order to render with Vulkan 1.3's dynamic rendering, we need to enable it here.
// Otherwise, we are only allowed to render with a render pass object, as in the
// standard triangle example. The feature is required to be supported by the device if
// it supports Vulkan 1.3 and higher, or if the `khr_dynamic_rendering` extension is
// available, so we don't need to check for support.
enabled_features: Features {
ray_query: true,
acceleration_structure: true,
buffer_device_address: true,
dynamic_rendering: true,
..Features::empty()
},
enabled_features: features,
..Default::default()
},
)
.unwrap();
// Since we can request multiple queues, the `queues` variable is in fact an iterator. We only
// use one queue in this example, so we just retrieve the first and only element of the
// iterator.
.unwrap();
let queue = queues.next().unwrap();
// Before we can draw on the surface, we have to create what is called a swapchain. Creating a
// swapchain allocates the color buffers that will contain the image that will ultimately be
// visible on the screen. These images are returned alongside the swapchain.
let (mut swapchain, images) = {
// Querying the capabilities of the surface. When we create the swapchain we can only pass
// values that are allowed by the capabilities.
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
// Choosing the internal format that the images will have.
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
// Please take a look at the docs for the meaning of the parameters we didn't mention.
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
// Some drivers report an `min_image_count` of 1, but fullscreen mode requires at
// least 2. Therefore we must ensure the count is at least 2, otherwise the program
// would crash when entering fullscreen mode on those drivers.
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
// The size of the window, only used to initially setup the swapchain.
//
// NOTE:
// On some drivers the swapchain extent is specified by
// `surface_capabilities.current_extent` and the swapchain size must use this
// extent. This extent is always the same as the window size.
//
// However, other drivers don't specify a value, i.e.
// `surface_capabilities.current_extent` is `None`. These drivers will allow
// anything, but the only sensible value is the window size.
//
// Both of these cases need the swapchain to use the window size, so we just
// use that.
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
// The alpha mode indicates how the alpha value of the final image will behave. For
// example, you can choose whether the window will be opaque or transparent.
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
// We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here
// to force rustc to use a defined layout for our data, as the default representation has *no
// guarantees*.
#[derive(BufferContents, Vertex)]
#[repr(C)]
struct Vertex {
@ -344,22 +221,8 @@ fn main() {
},
quad,
)
.unwrap();
.unwrap();
// The next step is to create the shaders.
//
// The raw shader creation API provided by the vulkano library is unsafe for various reasons,
// so The `shader!` macro provides a way to generate a Rust module from GLSL source - in the
// example below, the source is provided as a string input directly to the shader, but a path
// to a source file can be provided as well. Note that the user must specify the type of shader
// (e.g. "vertex", "fragment", etc.) using the `ty` option of the macro.
//
// The items generated by the `shader!` macro include a `load` function which loads the shader
// using an input logical device. The module also includes type definitions for layout
// structures defined in the shader source, for example uniforms and push constants.
//
// A more detailed overview of what the `shader!` macro generates can be found in the
// vulkano-shaders crate docs. You can view them at https://docs.rs/vulkano-shaders/
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
@ -420,20 +283,7 @@ fn main() {
}
}
// At this point, OpenGL initialization would be finished. However in Vulkan it is not. OpenGL
// implicitly does a lot of computation whenever you draw. In Vulkan, you have to do all this
// manually.
// Before we draw, we have to create what is called a **pipeline**. A pipeline describes how
// a GPU operation is to be performed. It is similar to an OpenGL program, but it also contains
// many settings for customization, all baked into a single object. For drawing, we create
// a **graphics** pipeline, but there are also other types of pipeline.
let pipeline = {
// First, we load the shaders that the pipeline will use:
// the vertex shader and the fragment shader.
//
// A Vulkan shader can in theory contain multiple entry points, so we have to specify which
// one.
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
@ -442,73 +292,35 @@ fn main() {
.unwrap()
.entry_point("main")
.unwrap();
// Automatically generate a vertex input state from the vertex shader's input interface,
// that takes a single vertex buffer containing `Vertex` structs.
let vertex_input_state = Vertex::per_vertex()
.definition(&vs.info().input_interface)
.unwrap();
// Make a list of the shader stages that the pipeline will have.
let stages = [
PipelineShaderStageCreateInfo::new(vs),
PipelineShaderStageCreateInfo::new(fs),
];
// We must now create a **pipeline layout** object, which describes the locations and types of
// descriptor sets and push constants used by the shaders in the pipeline.
//
// Multiple pipelines can share a common layout object, which is more efficient.
// The shaders in a pipeline must use a subset of the resources described in its pipeline
// layout, but the pipeline layout is allowed to contain resources that are not present in the
// shaders; they can be used by shaders in other pipelines that share the same layout.
// Thus, it is a good idea to design shaders so that many pipelines have common resource
// locations, which allows them to share pipeline layouts.
let layout = PipelineLayout::new(
device.clone(),
// Since we only have one pipeline in this example, and thus one pipeline layout,
// we automatically generate the creation info for it from the resources used in the
// shaders. In a real application, you would specify this information manually so that you
// can re-use one layout in multiple pipelines.
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(device.clone())
.unwrap(),
)
.unwrap();
.unwrap();
// We describe the formats of attachment images where the colors, depth and/or stencil
// information will be written. The pipeline will only be usable with this particular
// configuration of the attachment images.
let subpass = PipelineRenderingCreateInfo {
// We specify a single color attachment that will be rendered to. When we begin
// rendering, we will specify a swapchain image to be used as this attachment, so here
// we set its format to be the same format as the swapchain.
color_attachment_formats: vec![Some(swapchain.image_format())],
..Default::default()
};
// Finally, create the pipeline.
GraphicsPipeline::new(
device.clone(),
None,
GraphicsPipelineCreateInfo {
stages: stages.into_iter().collect(),
// How vertex data is read from the vertex buffers into the vertex shader.
vertex_input_state: Some(vertex_input_state),
// How vertices are arranged into primitive shapes.
// The default primitive shape is a triangle.
input_assembly_state: Some(InputAssemblyState::default()),
// How primitives are transformed and clipped to fit the framebuffer.
// We use a resizable viewport, set to draw over the entire window.
viewport_state: Some(ViewportState::viewport_dynamic_scissor_irrelevant()),
// How polygons are culled and converted into a raster of pixels.
// The default value does not perform any culling.
rasterization_state: Some(RasterizationState::default()),
// How multiple fragment shader samples are converted to a single pixel value.
// The default value does not perform any multisampling.
multisample_state: Some(MultisampleState::default()),
// How pixel values are combined with the values already present in the framebuffer.
// The default value overwrites the old value with the new one, without any blending.
color_blend_state: Some(ColorBlendState::new(
subpass.color_attachment_formats.len() as u32,
)),
@ -516,36 +328,20 @@ fn main() {
..GraphicsPipelineCreateInfo::layout(layout)
},
)
.unwrap()
.unwrap()
};
// Dynamic viewports allow us to recreate just the viewport when the window is resized.
// Otherwise we would have to recreate the whole pipeline.
let mut viewport = Viewport {
offset: [0.0, 0.0],
extent: [0.0, 0.0],
depth_range: 0.0..=1.0,
};
// When creating the swapchain, we only created plain images. To use them as an attachment for
// rendering, we must wrap then in an image view.
//
// Since we need to draw to multiple images, we are going to create a different image view for
// each image.
let mut attachment_image_views = window_size_dependent_setup(&images, &mut viewport);
// Before we can start creating and recording command buffers, we need a way of allocating
// them. Vulkano provides a command buffer allocator, which manages raw Vulkan command pools
// underneath and provides a safe interface for them.
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
// Keep the bottom-level acceleration structure alive
// because it is referenced by the top-level acceleration structure.
let (
top_level_acceleration_structure,
bottom_level_acceleration_structure,
) = {
let (top_level_acceleration_structure, bottom_level_acceleration_structure) = {
#[derive(BufferContents, Vertex)]
#[repr(C)]
struct Vertex {
@ -579,7 +375,7 @@ fn main() {
},
vertices,
)
.unwrap();
.unwrap();
let bottom_level_acceleration_structure = create_bottom_level_acceleration_structure(
&memory_allocator,
@ -605,205 +401,129 @@ fn main() {
let descriptor_set = PersistentDescriptorSet::new(
&descriptor_set_allocator,
pipeline.layout().set_layouts().get(0).unwrap().clone(),
[WriteDescriptorSet::acceleration_structure(0, top_level_acceleration_structure)],
[WriteDescriptorSet::acceleration_structure(
0,
top_level_acceleration_structure,
)],
[],
)
.unwrap();
.unwrap();
// Initialization is finally finished!
// In some situations, the swapchain will become invalid by itself. This includes for example
// when the window is resized (as the images of the swapchain will no longer match the
// window's) or, on Android, when the application went to the background and goes back to the
// foreground.
//
// In this situation, acquiring a swapchain image or presenting it will return an error.
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain. Here,
// we remember that we need to do this for the next loop iteration.
let mut recreate_swapchain = false;
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
// an object that implements the `GpuFuture` trait, which holds the resources for as long as
// they are in use by the GPU.
//
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
// that, we store the submission of the previous frame here.
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
event_loop.run(move |event, _, control_flow| {
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
Event::WindowEvent {
event: WindowEvent::Resized(_),
..
} => {
recreate_swapchain = true;
}
Event::RedrawEventsCleared => {
// Do not draw the frame when the screen size is zero. On Windows, this can
// occur when minimizing the application.
let image_extent: [u32; 2] = window.inner_size().into();
if image_extent.contains(&0) {
return;
}
previous_frame_end.as_mut().unwrap().cleanup_finished();
// It is important to call this function from time to time, otherwise resources
// will keep accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU
// has already processed, and frees the resources that are no longer needed.
previous_frame_end.as_mut().unwrap().cleanup_finished();
// Whenever the window resizes we need to recreate everything dependent on the
// window size. In this example that includes the swapchain, the framebuffers and
// the dynamic state viewport.
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.expect("failed to recreate swapchain");
swapchain = new_swapchain;
// Now that we have new swapchain images, we must create new image views from
// them as well.
attachment_image_views =
window_size_dependent_setup(&new_images, &mut viewport);
recreate_swapchain = false;
}
// Before we can draw on the output, we have to *acquire* an image from the
// swapchain. If no image is available (which happens if you submit draw commands
// too quickly), then the function will block. This operation returns the index of
// the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is an optional
// timeout after which the function call will return an error.
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
// `acquire_next_image` can be successful, but suboptimal. This means that the
// swapchain image will still work, but it may not display correctly. With some
// drivers this can be when the window resizes, but it may not cause the swapchain
// to become out of date.
if suboptimal {
recreate_swapchain = true;
}
// In order to draw, we have to build a *command buffer*. The command buffer object
// holds the list of commands that are going to be executed.
//
// Building a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to
// be optimized.
//
// Note that we have to pass a queue family when we create the command buffer. The
// command buffer will only be executable on that given queue family.
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
// Before we can draw, we have to *enter a render pass*. We specify which
// attachments we are going to use for rendering here, which needs to match
// what was previously specified when creating the pipeline.
.begin_rendering(RenderingInfo {
// As before, we specify one color attachment, but now we specify the image
// view to use as well as how it should be used.
color_attachments: vec![Some(RenderingAttachmentInfo {
// `Store` means that we ask the GPU to store the rendered output in
// the attachment image. We could also ask it to discard the result.
store_op: AttachmentStoreOp::Store,
..RenderingAttachmentInfo::image_view(
// We specify image view corresponding to the currently acquired
// swapchain image, to use for this attachment.
attachment_image_views[image_index as usize].clone(),
)
})],
..Default::default()
if recreate_swapchain {
let (new_swapchain, new_images) = swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..swapchain.create_info()
})
.unwrap()
// We are now inside the first subpass of the render pass.
//
// TODO: Document state setting and how it affects subsequent draw commands.
.set_viewport(0, [viewport.clone()].into_iter().collect())
.unwrap()
.bind_pipeline_graphics(pipeline.clone())
.unwrap()
.bind_vertex_buffers(0, quad_buffer.clone())
.unwrap()
.bind_descriptor_sets(
PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
descriptor_set.clone(),
)
.unwrap()
// We add a draw command.
.draw(quad_buffer.len() as u32, 1, 0, 0)
.unwrap()
// We leave the render pass.
.end_rendering()
.unwrap();
.expect("failed to recreate swapchain");
// Finish building the command buffer by calling `build`.
let command_buffer = builder.build().unwrap();
swapchain = new_swapchain;
attachment_image_views = window_size_dependent_setup(&new_images, &mut viewport);
recreate_swapchain = false;
}
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), command_buffer)
.unwrap()
// The color output is now expected to contain our triangle. But in order to
// show it on the screen, we have to *present* the image by calling
// `then_swapchain_present`.
//
// This function does not actually present the image immediately. Instead it
// submits a present command at the end of the queue. This means that it will
// only be presented once the GPU has finished executing the command buffer
// that draws the triangle.
.then_swapchain_present(
queue.clone(),
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
if suboptimal {
recreate_swapchain = true;
}
let mut builder = AutoCommandBufferBuilder::primary(
&command_buffer_allocator,
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
.begin_rendering(RenderingInfo {
color_attachments: vec![Some(RenderingAttachmentInfo {
store_op: AttachmentStoreOp::Store,
..RenderingAttachmentInfo::image_view(
attachment_image_views[image_index as usize].clone(),
)
})],
..Default::default()
})
.unwrap()
.set_viewport(0, [viewport.clone()].into_iter().collect())
.unwrap()
.bind_pipeline_graphics(pipeline.clone())
.unwrap()
.bind_vertex_buffers(0, quad_buffer.clone())
.unwrap()
.bind_descriptor_sets(
PipelineBindPoint::Graphics,
pipeline.layout().clone(),
0,
descriptor_set.clone(),
)
.unwrap()
.draw(quad_buffer.len() as u32, 1, 0, 0)
.unwrap()
.end_rendering()
.unwrap();
let command_buffer = builder.build().unwrap();
let future = previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_swapchain_present(
queue.clone(),
SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
previous_frame_end = Some(future.boxed());
}
Err(VulkanError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
Err(e) => {
println!("failed to flush future: {e}");
previous_frame_end = Some(sync::now(device.clone()).boxed());
}
}
_ => (),
}
_ => (),
});
}
@ -829,15 +549,17 @@ fn create_top_level_acceleration_structure(
) -> Arc<AccelerationStructure> {
let instances = bottom_level_acceleration_structures
.iter()
.map(|&bottom_level_acceleration_structure|
AccelerationStructureInstance {
.map(
|&bottom_level_acceleration_structure| AccelerationStructureInstance {
instance_shader_binding_table_record_offset_and_flags: Packed24_8::new(
0,
GeometryInstanceFlags::TRIANGLE_FACING_CULL_DISABLE.into(),
),
acceleration_structure_reference: bottom_level_acceleration_structure.device_address().get(),
acceleration_structure_reference: bottom_level_acceleration_structure
.device_address()
.get(),
..Default::default()
}
},
)
.collect::<Vec<_>>();
@ -855,14 +577,15 @@ fn create_top_level_acceleration_structure(
},
instances,
)
.unwrap();
.unwrap();
let geometries = AccelerationStructureGeometries::Instances(AccelerationStructureGeometryInstancesData {
flags: GeometryFlags::OPAQUE,
..AccelerationStructureGeometryInstancesData::new(
AccelerationStructureGeometryInstancesDataType::Values(Some(values))
)
});
let geometries =
AccelerationStructureGeometries::Instances(AccelerationStructureGeometryInstancesData {
flags: GeometryFlags::OPAQUE,
..AccelerationStructureGeometryInstancesData::new(
AccelerationStructureGeometryInstancesDataType::Values(Some(values)),
)
});
let build_info = AccelerationStructureBuildGeometryInfo {
flags: BuildAccelerationStructureFlags::PREFER_FAST_TRACE,
@ -870,14 +593,12 @@ fn create_top_level_acceleration_structure(
..AccelerationStructureBuildGeometryInfo::new(geometries)
};
let build_range_infos = [
AccelerationStructureBuildRangeInfo {
primitive_count: bottom_level_acceleration_structures.len() as _,
primitive_offset: 0,
first_vertex: 0,
transform_offset: 0,
}
];
let build_range_infos = [AccelerationStructureBuildRangeInfo {
primitive_count: bottom_level_acceleration_structures.len() as _,
primitive_offset: 0,
first_vertex: 0,
transform_offset: 0,
}];
build_acceleration_structure(
memory_allocator,
@ -906,28 +627,24 @@ fn create_bottom_level_acceleration_structure<T: BufferContents + Vertex>(
for &vertex_buffer in vertex_buffers {
let primitive_count = vertex_buffer.len() as u32 / 3;
triangles.push(
AccelerationStructureGeometryTrianglesData {
flags: GeometryFlags::OPAQUE,
vertex_data: Some(vertex_buffer.clone().into_bytes()),
vertex_stride: description.stride,
max_vertex: vertex_buffer.len() as _,
index_data: None,
transform_data: None,
..AccelerationStructureGeometryTrianglesData::new(
description.members.get("position").unwrap().format
)
}
);
triangles.push(AccelerationStructureGeometryTrianglesData {
flags: GeometryFlags::OPAQUE,
vertex_data: Some(vertex_buffer.clone().into_bytes()),
vertex_stride: description.stride,
max_vertex: vertex_buffer.len() as _,
index_data: None,
transform_data: None,
..AccelerationStructureGeometryTrianglesData::new(
description.members.get("position").unwrap().format,
)
});
max_primitive_counts.push(primitive_count);
build_range_infos.push(
AccelerationStructureBuildRangeInfo {
primitive_count,
primitive_offset: 0,
first_vertex: 0,
transform_offset: 0,
}
)
build_range_infos.push(AccelerationStructureBuildRangeInfo {
primitive_count,
primitive_offset: 0,
first_vertex: 0,
transform_offset: 0,
})
}
let geometries = AccelerationStructureGeometries::Triangles(triangles);
@ -965,7 +682,7 @@ fn create_acceleration_structure(
},
size,
)
.unwrap();
.unwrap();
unsafe {
AccelerationStructure::new(
@ -974,7 +691,8 @@ fn create_acceleration_structure(
ty,
..AccelerationStructureCreateInfo::new(buffer)
},
).unwrap()
)
.unwrap()
}
}
@ -994,7 +712,7 @@ fn create_scratch_buffer(
},
size,
)
.unwrap()
.unwrap()
}
fn build_acceleration_structure(
@ -1004,7 +722,7 @@ fn build_acceleration_structure(
ty: AccelerationStructureType,
mut build_info: AccelerationStructureBuildGeometryInfo,
max_primitive_counts: &[u32],
build_range_infos: impl IntoIterator<Item=AccelerationStructureBuildRangeInfo>,
build_range_infos: impl IntoIterator<Item = AccelerationStructureBuildRangeInfo>,
) -> Arc<AccelerationStructure> {
let device = memory_allocator.device();
@ -1012,21 +730,17 @@ fn build_acceleration_structure(
acceleration_structure_size,
build_scratch_size,
..
} = device.acceleration_structure_build_sizes(
AccelerationStructureBuildType::Device,
&build_info,
max_primitive_counts,
).unwrap();
} = device
.acceleration_structure_build_sizes(
AccelerationStructureBuildType::Device,
&build_info,
max_primitive_counts,
)
.unwrap();
let acceleration_structure = create_acceleration_structure(
memory_allocator,
ty,
acceleration_structure_size,
);
let scratch_buffer = create_scratch_buffer(
memory_allocator,
build_scratch_size,
);
let acceleration_structure =
create_acceleration_structure(memory_allocator, ty, acceleration_structure_size);
let scratch_buffer = create_scratch_buffer(memory_allocator, build_scratch_size);
build_info.dst_acceleration_structure = Some(acceleration_structure.clone());
build_info.scratch_data = Some(scratch_buffer);
@ -1036,13 +750,11 @@ fn build_acceleration_structure(
queue.queue_family_index(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
.unwrap();
unsafe {
builder.build_acceleration_structure(
build_info,
build_range_infos.into_iter().collect(),
)
builder
.build_acceleration_structure(build_info, build_range_infos.into_iter().collect())
.unwrap();
}
@ -1056,4 +768,4 @@ fn build_acceleration_structure(
.unwrap();
acceleration_structure
}
}