From 10d734955633aad8fe816d5cd12e6f3728749539 Mon Sep 17 00:00:00 2001 From: Rua Date: Wed, 7 Dec 2022 11:06:06 +0100 Subject: [PATCH] Add basic synchronization tracking to `CommandBufferBuilder` (#2099) --- vulkano/Cargo.toml | 1 + vulkano/src/command_buffer/mod.rs | 1 + .../standard/builder/bind_push.rs | 30 +- .../command_buffer/standard/builder/clear.rs | 115 +- .../command_buffer/standard/builder/copy.rs | 343 ++++- .../command_buffer/standard/builder/debug.rs | 3 + .../standard/builder/dynamic_state.rs | 102 +- .../command_buffer/standard/builder/mod.rs | 472 +++++- .../standard/builder/pipeline.rs | 541 ++++++- .../command_buffer/standard/builder/query.rs | 87 +- .../standard/builder/render_pass.rs | 34 +- .../standard/builder/secondary.rs | 14 +- .../command_buffer/standard/builder/sync.rs | 65 +- vulkano/src/image/layout.rs | 7 + vulkano/src/sync/mod.rs | 1 + vulkano/src/sync/pipeline.rs | 1349 ++++++++++++++++- 16 files changed, 2898 insertions(+), 267 deletions(-) diff --git a/vulkano/Cargo.toml b/vulkano/Cargo.toml index 28cf3ca5..df039f3b 100644 --- a/vulkano/Cargo.toml +++ b/vulkano/Cargo.toml @@ -23,6 +23,7 @@ crossbeam-queue = "0.3" half = "2" libloading = "0.7" nalgebra = { version = "0.31.0", optional = true } +once_cell = "1.16" parking_lot = { version = "0.12", features = ["send_guard"] } smallvec = "1.8" thread_local = "1.1" diff --git a/vulkano/src/command_buffer/mod.rs b/vulkano/src/command_buffer/mod.rs index c27eea12..e454e9c9 100644 --- a/vulkano/src/command_buffer/mod.rs +++ b/vulkano/src/command_buffer/mod.rs @@ -597,6 +597,7 @@ pub enum ResourceInCommand { DescriptorSet { set: u32, binding: u32, index: u32 }, Destination, FramebufferAttachment { index: u32 }, + ImageMemoryBarrier { index: u32 }, IndexBuffer, IndirectBuffer, SecondaryCommandBuffer { index: u32 }, diff --git a/vulkano/src/command_buffer/standard/builder/bind_push.rs b/vulkano/src/command_buffer/standard/builder/bind_push.rs index 5715aea6..26a27a8a 100644 --- a/vulkano/src/command_buffer/standard/builder/bind_push.rs +++ b/vulkano/src/command_buffer/standard/builder/bind_push.rs @@ -166,7 +166,7 @@ where dynamic_offsets.as_ptr(), ); - let state = self.current_state.invalidate_descriptor_sets( + let state = self.builder_state.invalidate_descriptor_sets( pipeline_bind_point, pipeline_layout.clone(), first_set, @@ -189,6 +189,7 @@ where self.resources.push(Box::new(pipeline_layout)); + self.next_command_index += 1; self } @@ -271,9 +272,10 @@ where index_type.into(), ); - self.current_state.index_buffer = Some((buffer.clone(), index_type)); + self.builder_state.index_buffer = Some((buffer.clone(), index_type)); self.resources.push(Box::new(buffer)); + self.next_command_index += 1; self } @@ -321,9 +323,10 @@ where pipeline.handle(), ); - self.current_state.pipeline_compute = Some(pipeline.clone()); + self.builder_state.pipeline_compute = Some(pipeline.clone()); self.resources.push(Box::new(pipeline)); + self.next_command_index += 1; self } @@ -357,12 +360,12 @@ where assert_eq!(self.device(), pipeline.device()); if let Some(last_pipeline) = - self.current_state + self.builder_state .render_pass .as_ref() .and_then(|render_pass_state| match &render_pass_state.render_pass { RenderPassStateType::BeginRendering(state) if state.pipeline_used => { - self.current_state.pipeline_graphics.as_ref() + self.builder_state.pipeline_graphics.as_ref() } _ => None, }) @@ -416,15 +419,16 @@ where // Reset any states that are fixed in the new pipeline. The pipeline bind command will // overwrite these states. - self.current_state.reset_dynamic_states( + self.builder_state.reset_dynamic_states( pipeline .dynamic_states() .filter(|(_, d)| !d) // not dynamic .map(|(s, _)| s), ); - self.current_state.pipeline_graphics = Some(pipeline.clone()); + self.builder_state.pipeline_graphics = Some(pipeline.clone()); self.resources.push(Box::new(pipeline)); + self.next_command_index += 1; self } @@ -532,12 +536,13 @@ where self.resources.reserve(buffers.len()); for (i, buffer) in buffers.into_iter().enumerate() { - self.current_state + self.builder_state .vertex_buffers .insert(first_binding + i as u32, buffer.clone()); self.resources.push(Box::new(buffer)); } + self.next_command_index += 1; self } @@ -667,13 +672,13 @@ where // push constants as set, and never unsets them. See: // https://github.com/KhronosGroup/Vulkan-Docs/issues/1485 // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2711 - self.current_state + self.builder_state .push_constants .insert(offset..offset + push_constants.len() as u32); - self.current_state.push_constants_pipeline_layout = Some(pipeline_layout.clone()); - + self.builder_state.push_constants_pipeline_layout = Some(pipeline_layout.clone()); self.resources.push(Box::new(pipeline_layout)); + self.next_command_index += 1; self } @@ -841,7 +846,7 @@ where writes.as_ptr(), ); - let state = self.current_state.invalidate_descriptor_sets( + let state = self.builder_state.invalidate_descriptor_sets( pipeline_bind_point, pipeline_layout.clone(), set_num, @@ -863,6 +868,7 @@ where self.resources.push(Box::new(pipeline_layout)); + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/clear.rs b/vulkano/src/command_buffer/standard/builder/clear.rs index c4ab73ff..f0c8db35 100644 --- a/vulkano/src/command_buffer/standard/builder/clear.rs +++ b/vulkano/src/command_buffer/standard/builder/clear.rs @@ -13,10 +13,11 @@ use super::{ }; use crate::{ buffer::{BufferAccess, BufferContents, BufferUsage, TypedBufferAccess}, - command_buffer::allocator::CommandBufferAllocator, + command_buffer::{allocator::CommandBufferAllocator, ResourceInCommand, ResourceUseRef}, device::{DeviceOwned, QueueFlags}, format::FormatFeatures, image::{ImageAccess, ImageAspects, ImageLayout, ImageUsage}, + sync::PipelineStageAccess, DeviceSize, RequiresOneOf, Version, VulkanObject, }; use smallvec::SmallVec; @@ -50,7 +51,7 @@ where let device = self.device(); // VUID-vkCmdClearColorImage-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(ClearError::ForbiddenInsideRenderPass); } @@ -193,6 +194,7 @@ where return self; } + let image_inner = image.inner(); let clear_value = clear_value.into(); let ranges: SmallVec<[_; 8]> = regions .iter() @@ -203,17 +205,40 @@ where let fns = self.device().fns(); (fns.v1_0.cmd_clear_color_image)( self.handle(), - image.inner().image.handle(), + image_inner.image.handle(), image_layout.into(), &clear_value, ranges.len() as u32, ranges.as_ptr(), ); + let command_index = self.next_command_index; + let command_name = "clear_color_image"; + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for mut subresource_range in regions { + subresource_range.array_layers.start += image_inner.first_layer; + subresource_range.array_layers.end += image_inner.first_layer; + subresource_range.mip_levels.start += image_inner.first_mipmap_level; + subresource_range.mip_levels.end += image_inner.first_mipmap_level; + + self.resources_usage_state.record_image_access( + &use_ref, + image_inner.image, + subresource_range, + PipelineStageAccess::Clear_TransferWrite, + image_layout, + ); + } + self.resources.push(Box::new(image)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -241,7 +266,7 @@ where let device = self.device(); // VUID-vkCmdClearDepthStencilImage-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(ClearError::ForbiddenInsideRenderPass); } @@ -402,6 +427,7 @@ where return self; } + let image_inner = image.inner(); let clear_value = clear_value.into(); let ranges: SmallVec<[_; 8]> = regions .iter() @@ -412,17 +438,40 @@ where let fns = self.device().fns(); (fns.v1_0.cmd_clear_depth_stencil_image)( self.handle(), - image.inner().image.handle(), + image_inner.image.handle(), image_layout.into(), &clear_value, ranges.len() as u32, ranges.as_ptr(), ); + let command_index = self.next_command_index; + let command_name = "clear_depth_stencil_image"; + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for mut subresource_range in regions { + subresource_range.array_layers.start += image_inner.first_layer; + subresource_range.array_layers.end += image_inner.first_layer; + subresource_range.mip_levels.start += image_inner.first_mipmap_level; + subresource_range.mip_levels.end += image_inner.first_mipmap_level; + + self.resources_usage_state.record_image_access( + &use_ref, + image_inner.image, + subresource_range, + PipelineStageAccess::Clear_TransferWrite, + image_layout, + ); + } + self.resources.push(Box::new(image)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -453,7 +502,7 @@ where let device = self.device(); // VUID-vkCmdFillBuffer-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(ClearError::ForbiddenInsideRenderPass); } @@ -554,10 +603,28 @@ where data, ); + let command_index = self.next_command_index; + let command_name = "fill_buffer"; + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + let mut dst_range = dst_offset..dst_offset + size; + dst_range.start += dst_buffer_inner.offset; + dst_range.end += dst_buffer_inner.offset; + self.resources_usage_state.record_buffer_access( + &use_ref, + dst_buffer_inner.buffer, + dst_range, + PipelineStageAccess::Clear_TransferWrite, + ); + self.resources.push(Box::new(dst_buffer)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -596,7 +663,7 @@ where let device = self.device(); // VUID-vkCmdUpdateBuffer-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(ClearError::ForbiddenInsideRenderPass); } @@ -684,10 +751,28 @@ where data.as_bytes().as_ptr() as *const _, ); + let command_index = self.next_command_index; + let command_name = "update_buffer"; + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + let mut dst_range = dst_offset..dst_offset + size_of_val(data) as DeviceSize; + dst_range.start += dst_buffer_inner.offset; + dst_range.end += dst_buffer_inner.offset; + self.resources_usage_state.record_buffer_access( + &use_ref, + dst_buffer_inner.buffer, + dst_range, + PipelineStageAccess::Clear_TransferWrite, + ); + self.resources.push(Box::new(dst_buffer)); - // TODO: sync state update - + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/copy.rs b/vulkano/src/command_buffer/standard/builder/copy.rs index a323c8e9..cd758591 100644 --- a/vulkano/src/command_buffer/standard/builder/copy.rs +++ b/vulkano/src/command_buffer/standard/builder/copy.rs @@ -14,14 +14,18 @@ use super::{ }; use crate::{ buffer::{BufferAccess, BufferUsage}, - command_buffer::{allocator::CommandBufferAllocator, ImageBlit, ImageResolve}, + command_buffer::{ + allocator::CommandBufferAllocator, ImageBlit, ImageResolve, ResourceInCommand, + ResourceUseRef, + }, device::{DeviceOwned, QueueFlags}, format::{Format, FormatFeatures, NumericType}, image::{ - ImageAccess, ImageAspects, ImageDimensions, ImageLayout, ImageSubresourceLayers, ImageType, - ImageUsage, SampleCount, SampleCounts, + ImageAccess, ImageAspects, ImageDimensions, ImageLayout, ImageSubresourceLayers, + ImageSubresourceRange, ImageType, ImageUsage, SampleCount, SampleCounts, }, sampler::Filter, + sync::PipelineStageAccess, DeviceSize, Version, VulkanObject, }; use smallvec::SmallVec; @@ -57,7 +61,7 @@ where let device = self.device(); // VUID-vkCmdCopyBuffer2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(CopyError::ForbiddenInsideRenderPass); } @@ -257,11 +261,54 @@ where ); } + let command_index = self.next_command_index; + let command_name = "copy_buffer"; + let src_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Source, + secondary_use_ref: None, + }; + let dst_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for region in regions { + let BufferCopy { + src_offset, + dst_offset, + size, + _ne: _, + } = region; + + let mut src_range = src_offset..src_offset + size; + src_range.start += src_buffer_inner.offset; + src_range.end += src_buffer_inner.offset; + self.resources_usage_state.record_buffer_access( + &src_use_ref, + src_buffer_inner.buffer, + src_range, + PipelineStageAccess::Copy_TransferRead, + ); + + let mut dst_range = dst_offset..dst_offset + size; + dst_range.start += dst_buffer_inner.offset; + dst_range.end += dst_buffer_inner.offset; + self.resources_usage_state.record_buffer_access( + &dst_use_ref, + dst_buffer_inner.buffer, + dst_range, + PipelineStageAccess::Copy_TransferWrite, + ); + } + self.resources.push(Box::new(src_buffer)); self.resources.push(Box::new(dst_buffer)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -305,7 +352,7 @@ where let device = self.device(); // VUID-vkCmdCopyImage2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(CopyError::ForbiddenInsideRenderPass); } @@ -1127,11 +1174,62 @@ where ); } + let command_index = self.next_command_index; + let command_name = "copy_image"; + let src_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Source, + secondary_use_ref: None, + }; + let dst_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for region in regions { + let ImageCopy { + src_subresource, + src_offset: _, + dst_subresource, + dst_offset: _, + extent: _, + _ne: _, + } = region; + + let mut src_subresource_range = ImageSubresourceRange::from(src_subresource); + src_subresource_range.array_layers.start += src_image_inner.first_layer; + src_subresource_range.array_layers.end += src_image_inner.first_layer; + src_subresource_range.mip_levels.start += src_image_inner.first_mipmap_level; + src_subresource_range.mip_levels.end += src_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &src_use_ref, + src_image_inner.image, + src_subresource_range, + PipelineStageAccess::Copy_TransferRead, + src_image_layout, + ); + + let mut dst_subresource_range = ImageSubresourceRange::from(dst_subresource); + dst_subresource_range.array_layers.start += dst_image_inner.first_layer; + dst_subresource_range.array_layers.end += dst_image_inner.first_layer; + dst_subresource_range.mip_levels.start += dst_image_inner.first_mipmap_level; + dst_subresource_range.mip_levels.end += dst_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &dst_use_ref, + dst_image_inner.image, + dst_subresource_range, + PipelineStageAccess::Copy_TransferWrite, + dst_image_layout, + ); + } + self.resources.push(Box::new(src_image)); self.resources.push(Box::new(dst_image)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -1159,7 +1257,7 @@ where let device = self.device(); // VUID-vkCmdCopyBufferToImage2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(CopyError::ForbiddenInsideRenderPass); } @@ -1705,11 +1803,62 @@ where ); } + let command_index = self.next_command_index; + let command_name = "copy_buffer_to_image"; + let src_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Source, + secondary_use_ref: None, + }; + let dst_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for region in regions { + let buffer_copy_size = region.buffer_copy_size(dst_image.format()); + + let BufferImageCopy { + buffer_offset, + buffer_row_length: _, + buffer_image_height: _, + image_subresource, + image_offset: _, + image_extent: _, + _ne: _, + } = region; + + let mut src_range = buffer_offset..buffer_offset + buffer_copy_size; + src_range.start += src_buffer_inner.offset; + src_range.end += src_buffer_inner.offset; + self.resources_usage_state.record_buffer_access( + &src_use_ref, + src_buffer_inner.buffer, + src_range, + PipelineStageAccess::Copy_TransferRead, + ); + + let mut dst_subresource_range = ImageSubresourceRange::from(image_subresource); + dst_subresource_range.array_layers.start += dst_image_inner.first_layer; + dst_subresource_range.array_layers.end += dst_image_inner.first_layer; + dst_subresource_range.mip_levels.start += dst_image_inner.first_mipmap_level; + dst_subresource_range.mip_levels.end += dst_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &dst_use_ref, + dst_image_inner.image, + dst_subresource_range, + PipelineStageAccess::Copy_TransferWrite, + dst_image_layout, + ); + } + self.resources.push(Box::new(src_buffer)); self.resources.push(Box::new(dst_image)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -1737,7 +1886,7 @@ where let device = self.device(); // VUID-vkCmdCopyImageToBuffer2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(CopyError::ForbiddenInsideRenderPass); } @@ -2273,11 +2422,62 @@ where ); } + let command_index = self.next_command_index; + let command_name = "copy_image_to_buffer"; + let src_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Source, + secondary_use_ref: None, + }; + let dst_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for region in regions { + let buffer_copy_size = region.buffer_copy_size(src_image.format()); + + let BufferImageCopy { + buffer_offset, + buffer_row_length: _, + buffer_image_height: _, + image_subresource, + image_offset: _, + image_extent: _, + _ne: _, + } = region; + + let mut src_subresource_range = ImageSubresourceRange::from(image_subresource); + src_subresource_range.array_layers.start += src_image_inner.first_layer; + src_subresource_range.array_layers.end += src_image_inner.first_layer; + src_subresource_range.mip_levels.start += src_image_inner.first_mipmap_level; + src_subresource_range.mip_levels.end += src_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &src_use_ref, + src_image_inner.image, + src_subresource_range, + PipelineStageAccess::Copy_TransferRead, + src_image_layout, + ); + + let mut dst_range = buffer_offset..buffer_offset + buffer_copy_size; + dst_range.start += dst_buffer_inner.offset; + dst_range.end += dst_buffer_inner.offset; + self.resources_usage_state.record_buffer_access( + &dst_use_ref, + dst_buffer_inner.buffer, + dst_range, + PipelineStageAccess::Copy_TransferWrite, + ); + } + self.resources.push(Box::new(src_image)); self.resources.push(Box::new(dst_buffer)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -2331,7 +2531,7 @@ where let device = self.device(); // VUID-vkCmdBlitImage2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(CopyError::ForbiddenInsideRenderPass); } @@ -2983,11 +3183,61 @@ where ); } + let command_index = self.next_command_index; + let command_name = "blit_image"; + let src_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Source, + secondary_use_ref: None, + }; + let dst_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for region in regions { + let ImageBlit { + src_subresource, + src_offsets: _, + dst_subresource, + dst_offsets: _, + _ne: _, + } = region; + + let mut src_subresource_range = ImageSubresourceRange::from(src_subresource); + src_subresource_range.array_layers.start += src_image_inner.first_layer; + src_subresource_range.array_layers.end += src_image_inner.first_layer; + src_subresource_range.mip_levels.start += src_image_inner.first_mipmap_level; + src_subresource_range.mip_levels.end += src_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &src_use_ref, + src_image_inner.image, + src_subresource_range, + PipelineStageAccess::Blit_TransferRead, + src_image_layout, + ); + + let mut dst_subresource_range = ImageSubresourceRange::from(dst_subresource); + dst_subresource_range.array_layers.start += dst_image_inner.first_layer; + dst_subresource_range.array_layers.end += dst_image_inner.first_layer; + dst_subresource_range.mip_levels.start += dst_image_inner.first_mipmap_level; + dst_subresource_range.mip_levels.end += dst_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &dst_use_ref, + dst_image_inner.image, + dst_subresource_range, + PipelineStageAccess::Blit_TransferWrite, + dst_image_layout, + ); + } + self.resources.push(Box::new(src_image)); self.resources.push(Box::new(dst_image)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -3020,7 +3270,7 @@ where let device = self.device(); // VUID-vkCmdResolveImage2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(CopyError::ForbiddenInsideRenderPass); } @@ -3412,11 +3662,62 @@ where ); } + let command_index = self.next_command_index; + let command_name = "resolve_image"; + let src_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Source, + secondary_use_ref: None, + }; + let dst_use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + for region in regions { + let ImageResolve { + src_subresource, + src_offset: _, + dst_subresource, + dst_offset: _, + extent: _, + _ne: _, + } = region; + + let mut src_subresource_range = ImageSubresourceRange::from(src_subresource); + src_subresource_range.array_layers.start += src_image_inner.first_layer; + src_subresource_range.array_layers.end += src_image_inner.first_layer; + src_subresource_range.mip_levels.start += src_image_inner.first_mipmap_level; + src_subresource_range.mip_levels.end += src_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &src_use_ref, + src_image_inner.image, + src_subresource_range, + PipelineStageAccess::Resolve_TransferRead, + src_image_layout, + ); + + let mut dst_subresource_range = ImageSubresourceRange::from(dst_subresource); + dst_subresource_range.array_layers.start += dst_image_inner.first_layer; + dst_subresource_range.array_layers.end += dst_image_inner.first_layer; + dst_subresource_range.mip_levels.start += dst_image_inner.first_mipmap_level; + dst_subresource_range.mip_levels.end += dst_image_inner.first_mipmap_level; + self.resources_usage_state.record_image_access( + &dst_use_ref, + dst_image_inner.image, + dst_subresource_range, + PipelineStageAccess::Resolve_TransferWrite, + dst_image_layout, + ); + } + self.resources.push(Box::new(src_image)); self.resources.push(Box::new(dst_image)); - // TODO: sync state update - + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/debug.rs b/vulkano/src/command_buffer/standard/builder/debug.rs index 5708014c..e0aa436e 100644 --- a/vulkano/src/command_buffer/standard/builder/debug.rs +++ b/vulkano/src/command_buffer/standard/builder/debug.rs @@ -84,6 +84,7 @@ where let fns = self.device().instance().fns(); (fns.ext_debug_utils.cmd_begin_debug_utils_label_ext)(self.handle(), &label_info); + self.next_command_index += 1; self } @@ -141,6 +142,7 @@ where let fns = self.device().instance().fns(); (fns.ext_debug_utils.cmd_end_debug_utils_label_ext)(self.handle()); + self.next_command_index += 1; self } @@ -208,6 +210,7 @@ where let fns = self.device().instance().fns(); (fns.ext_debug_utils.cmd_insert_debug_utils_label_ext)(self.handle(), &label_info); + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/dynamic_state.rs b/vulkano/src/command_buffer/standard/builder/dynamic_state.rs index 9b3383dd..d0f357bb 100644 --- a/vulkano/src/command_buffer/standard/builder/dynamic_state.rs +++ b/vulkano/src/command_buffer/standard/builder/dynamic_state.rs @@ -40,7 +40,7 @@ where ) -> Result<(), SetDynamicStateError> { // VUID-vkCmdDispatch-None-02859 if self - .current_state + .builder_state .pipeline_graphics .as_ref() .map_or(false, |pipeline| { @@ -90,8 +90,9 @@ where let fns = self.device().fns(); (fns.v1_0.cmd_set_blend_constants)(self.handle(), &constants); - self.current_state.blend_constants = Some(constants); + self.builder_state.blend_constants = Some(constants); + self.next_command_index += 1; self } @@ -148,7 +149,7 @@ where } if let Some(color_blend_state) = self - .current_state + .builder_state .pipeline_graphics .as_ref() .and_then(|pipeline| pipeline.color_blend_state()) @@ -193,8 +194,9 @@ where enables_vk.as_ptr(), ); - self.current_state.color_write_enable = Some(enables); + self.builder_state.color_write_enable = Some(enables); + self.next_command_index += 1; self } @@ -263,8 +265,9 @@ where (fns.ext_extended_dynamic_state.cmd_set_cull_mode_ext)(self.handle(), cull_mode.into()); } - self.current_state.cull_mode = Some(cull_mode); + self.builder_state.cull_mode = Some(cull_mode); + self.next_command_index += 1; self } @@ -333,12 +336,13 @@ where let fns = self.device().fns(); (fns.v1_0.cmd_set_depth_bias)(self.handle(), constant_factor, clamp, slope_factor); - self.current_state.depth_bias = Some(DepthBias { + self.builder_state.depth_bias = Some(DepthBias { constant_factor, clamp, slope_factor, }); + self.next_command_index += 1; self } @@ -405,8 +409,9 @@ where .cmd_set_depth_bias_enable_ext)(self.handle(), enable.into()); } - self.current_state.depth_bias_enable = Some(enable); + self.builder_state.depth_bias_enable = Some(enable); + self.next_command_index += 1; self } @@ -467,8 +472,9 @@ where let fns = self.device().fns(); (fns.v1_0.cmd_set_depth_bounds)(self.handle(), *bounds.start(), *bounds.end()); - self.current_state.depth_bounds = Some(bounds); + self.builder_state.depth_bounds = Some(bounds); + self.next_command_index += 1; self } @@ -538,8 +544,9 @@ where .cmd_set_depth_bounds_test_enable_ext)(self.handle(), enable.into()); } - self.current_state.depth_bounds_test_enable = Some(enable); + self.builder_state.depth_bounds_test_enable = Some(enable); + self.next_command_index += 1; self } @@ -613,8 +620,9 @@ where ); } - self.current_state.depth_compare_op = Some(compare_op); + self.builder_state.depth_compare_op = Some(compare_op); + self.next_command_index += 1; self } @@ -682,8 +690,9 @@ where ); } - self.current_state.depth_test_enable = Some(enable); + self.builder_state.depth_test_enable = Some(enable); + self.next_command_index += 1; self } @@ -749,8 +758,9 @@ where .cmd_set_depth_write_enable_ext)(self.handle(), enable.into()); } - self.current_state.depth_write_enable = Some(enable); + self.builder_state.depth_write_enable = Some(enable); + self.next_command_index += 1; self } @@ -775,11 +785,7 @@ where self.validate_set_discard_rectangle(first_rectangle, &rectangles) .unwrap(); - unsafe { - self.set_discard_rectangle_unchecked(first_rectangle, rectangles); - } - - self + unsafe { self.set_discard_rectangle_unchecked(first_rectangle, rectangles) } } fn validate_set_discard_rectangle( @@ -872,9 +878,10 @@ where for (num, rectangle) in rectangles.iter().enumerate() { let num = num as u32 + first_rectangle; - self.current_state.discard_rectangle.insert(num, *rectangle); + self.builder_state.discard_rectangle.insert(num, *rectangle); } + self.next_command_index += 1; self } @@ -942,8 +949,9 @@ where (fns.ext_extended_dynamic_state.cmd_set_front_face_ext)(self.handle(), face.into()); } - self.current_state.front_face = Some(face); + self.builder_state.front_face = Some(face); + self.next_command_index += 1; self } @@ -1004,8 +1012,9 @@ where let fns = self.device().fns(); (fns.ext_line_rasterization.cmd_set_line_stipple_ext)(self.handle(), factor, pattern); - self.current_state.line_stipple = Some(LineStipple { factor, pattern }); + self.builder_state.line_stipple = Some(LineStipple { factor, pattern }); + self.next_command_index += 1; self } @@ -1055,8 +1064,9 @@ where let fns = self.device().fns(); (fns.v1_0.cmd_set_line_width)(self.handle(), line_width); - self.current_state.line_width = Some(line_width); + self.builder_state.line_width = Some(line_width); + self.next_command_index += 1; self } @@ -1125,8 +1135,9 @@ where let fns = self.device().fns(); (fns.ext_extended_dynamic_state2.cmd_set_logic_op_ext)(self.handle(), logic_op.into()); - self.current_state.logic_op = Some(logic_op); + self.builder_state.logic_op = Some(logic_op); + self.next_command_index += 1; self } @@ -1213,8 +1224,9 @@ where (fns.ext_extended_dynamic_state2 .cmd_set_patch_control_points_ext)(self.handle(), num); - self.current_state.patch_control_points = Some(num); + self.builder_state.patch_control_points = Some(num); + self.next_command_index += 1; self } @@ -1283,8 +1295,9 @@ where .cmd_set_primitive_restart_enable_ext)(self.handle(), enable.into()); } - self.current_state.primitive_restart_enable = Some(enable); + self.builder_state.primitive_restart_enable = Some(enable); + self.next_command_index += 1; self } @@ -1409,8 +1422,9 @@ where .cmd_set_primitive_topology_ext)(self.handle(), topology.into()); } - self.current_state.primitive_topology = Some(topology); + self.builder_state.primitive_topology = Some(topology); + self.next_command_index += 1; self } @@ -1479,8 +1493,9 @@ where .cmd_set_rasterizer_discard_enable_ext)(self.handle(), enable.into()); } - self.current_state.rasterizer_discard_enable = Some(enable); + self.builder_state.rasterizer_discard_enable = Some(enable); + self.next_command_index += 1; self } @@ -1588,9 +1603,10 @@ where for (num, scissor) in scissors.iter().enumerate() { let num = num as u32 + first_scissor; - self.current_state.scissor.insert(num, *scissor); + self.builder_state.scissor.insert(num, *scissor); } + self.next_command_index += 1; self } @@ -1709,8 +1725,9 @@ where ); } - self.current_state.scissor_with_count = Some(scissors); + self.builder_state.scissor_with_count = Some(scissors); + self.next_command_index += 1; self } @@ -1766,13 +1783,14 @@ where let faces = ash::vk::StencilFaceFlags::from(faces); if faces.intersects(ash::vk::StencilFaceFlags::FRONT) { - self.current_state.stencil_compare_mask.front = Some(compare_mask); + self.builder_state.stencil_compare_mask.front = Some(compare_mask); } if faces.intersects(ash::vk::StencilFaceFlags::BACK) { - self.current_state.stencil_compare_mask.back = Some(compare_mask); + self.builder_state.stencil_compare_mask.back = Some(compare_mask); } + self.next_command_index += 1; self } @@ -1891,7 +1909,7 @@ where let faces = ash::vk::StencilFaceFlags::from(faces); if faces.intersects(ash::vk::StencilFaceFlags::FRONT) { - self.current_state.stencil_op.front = Some(StencilOps { + self.builder_state.stencil_op.front = Some(StencilOps { fail_op, pass_op, depth_fail_op, @@ -1900,7 +1918,7 @@ where } if faces.intersects(ash::vk::StencilFaceFlags::BACK) { - self.current_state.stencil_op.back = Some(StencilOps { + self.builder_state.stencil_op.back = Some(StencilOps { fail_op, pass_op, depth_fail_op, @@ -1908,6 +1926,7 @@ where }); } + self.next_command_index += 1; self } @@ -1959,13 +1978,14 @@ where let faces = ash::vk::StencilFaceFlags::from(faces); if faces.intersects(ash::vk::StencilFaceFlags::FRONT) { - self.current_state.stencil_reference.front = Some(reference); + self.builder_state.stencil_reference.front = Some(reference); } if faces.intersects(ash::vk::StencilFaceFlags::BACK) { - self.current_state.stencil_reference.back = Some(reference); + self.builder_state.stencil_reference.back = Some(reference); } + self.next_command_index += 1; self } @@ -2031,8 +2051,9 @@ where .cmd_set_stencil_test_enable_ext)(self.handle(), enable.into()); } - self.current_state.stencil_test_enable = Some(enable); + self.builder_state.stencil_test_enable = Some(enable); + self.next_command_index += 1; self } @@ -2084,13 +2105,14 @@ where let faces = ash::vk::StencilFaceFlags::from(faces); if faces.intersects(ash::vk::StencilFaceFlags::FRONT) { - self.current_state.stencil_write_mask.front = Some(write_mask); + self.builder_state.stencil_write_mask.front = Some(write_mask); } if faces.intersects(ash::vk::StencilFaceFlags::BACK) { - self.current_state.stencil_write_mask.back = Some(write_mask); + self.builder_state.stencil_write_mask.back = Some(write_mask); } + self.next_command_index += 1; self } @@ -2198,9 +2220,10 @@ where for (num, viewport) in viewports.iter().enumerate() { let num = num as u32 + first_viewport; - self.current_state.viewport.insert(num, viewport.clone()); + self.builder_state.viewport.insert(num, viewport.clone()); } + self.next_command_index += 1; self } @@ -2319,8 +2342,9 @@ where ); } - self.current_state.viewport_with_count = Some(viewports); + self.builder_state.viewport_with_count = Some(viewports); + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/mod.rs b/vulkano/src/command_buffer/standard/builder/mod.rs index 1ac47cc3..233f4e6e 100644 --- a/vulkano/src/command_buffer/standard/builder/mod.rs +++ b/vulkano/src/command_buffer/standard/builder/mod.rs @@ -21,7 +21,7 @@ pub use crate::command_buffer::{ RenderingAttachmentInfo, RenderingAttachmentResolveInfo, RenderingInfo, ResolveImageInfo, }; use crate::{ - buffer::BufferAccess, + buffer::{sys::Buffer, BufferAccess}, command_buffer::{ allocator::{ CommandBufferAllocator, CommandBufferBuilderAlloc, StandardCommandBufferAllocator, @@ -30,12 +30,12 @@ use crate::{ BuildError, CommandBufferBeginError, CommandBufferInheritanceInfo, CommandBufferInheritanceRenderPassInfo, CommandBufferInheritanceRenderPassType, CommandBufferInheritanceRenderingInfo, CommandBufferLevel, CommandBufferUsage, - SubpassContents, + ResourceInCommand, ResourceUseRef, SubpassContents, }, descriptor_set::{DescriptorSetResources, DescriptorSetWithOffsets}, - device::{Device, DeviceOwned, QueueFamilyProperties}, + device::{Device, DeviceOwned, QueueFamilyProperties, QueueFlags}, format::{Format, FormatFeatures}, - image::ImageAspects, + image::{sys::Image, ImageAspects, ImageLayout, ImageSubresourceRange}, pipeline::{ graphics::{ color_blend::LogicOp, @@ -47,17 +47,23 @@ use crate::{ ComputePipeline, DynamicState, GraphicsPipeline, PipelineBindPoint, PipelineLayout, }, query::{QueryControlFlags, QueryType}, + range_map::RangeMap, range_set::RangeSet, render_pass::{Framebuffer, Subpass}, - OomError, RequiresOneOf, VulkanError, VulkanObject, + sync::{ + BufferMemoryBarrier, DependencyInfo, ImageMemoryBarrier, PipelineStage, + PipelineStageAccess, PipelineStageAccessSet, PipelineStages, + }, + DeviceSize, OomError, RequiresOneOf, VulkanError, VulkanObject, }; +use ahash::HashMap; use parking_lot::Mutex; use smallvec::SmallVec; use std::{ any::Any, - collections::{hash_map::Entry, HashMap}, + collections::hash_map::Entry, marker::PhantomData, - ops::RangeInclusive, + ops::{Range, RangeInclusive}, ptr, sync::{atomic::AtomicBool, Arc}, }; @@ -83,8 +89,10 @@ where queue_family_index: u32, usage: CommandBufferUsage, + next_command_index: usize, resources: Vec>, - current_state: CurrentState, + builder_state: CommandBufferBuilderState, + resources_usage_state: ResourcesState, _data: PhantomData, } @@ -542,7 +550,7 @@ where .map_err(VulkanError::from)?; } - let mut current_state: CurrentState = Default::default(); + let mut builder_state: CommandBufferBuilderState = Default::default(); if let Some(inheritance_info) = &inheritance_info { let &CommandBufferInheritanceInfo { @@ -553,7 +561,7 @@ where } = inheritance_info; if let Some(render_pass) = render_pass { - current_state.render_pass = Some(RenderPassState::from_inheritance(render_pass)); + builder_state.render_pass = Some(RenderPassState::from_inheritance(render_pass)); } } @@ -563,8 +571,10 @@ where queue_family_index, usage, + next_command_index: 0, resources: Vec::new(), - current_state, + builder_state, + resources_usage_state: Default::default(), _data: PhantomData, }) @@ -581,11 +591,11 @@ where { /// Builds the command buffer. pub fn build(self) -> Result, BuildError> { - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(BuildError::RenderPassActive); } - if !self.current_state.queries.is_empty() { + if !self.builder_state.queries.is_empty() { return Err(BuildError::QueryActive); } @@ -615,7 +625,7 @@ where { /// Builds the command buffer. pub fn build(self) -> Result, BuildError> { - if !self.current_state.queries.is_empty() { + if !self.builder_state.queries.is_empty() { return Err(BuildError::QueryActive); } @@ -653,7 +663,7 @@ where /// Holds the current binding and setting state. #[derive(Default)] -struct CurrentState { +struct CommandBufferBuilderState { // Render pass render_pass: Option, @@ -700,7 +710,7 @@ struct CurrentState { queries: HashMap, } -impl CurrentState { +impl CommandBufferBuilderState { fn reset_dynamic_states(&mut self, states: impl IntoIterator) { for state in states { match state { @@ -962,3 +972,433 @@ struct QueryState { flags: QueryControlFlags, in_subpass: bool, } + +#[derive(Debug, Default)] +struct ResourcesState { + buffers: HashMap, RangeMap>, + images: HashMap, RangeMap>, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +struct BufferRangeState { + resource_uses: Vec, + memory_access: MemoryAccessState, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +struct ImageRangeState { + resource_uses: Vec, + memory_access: MemoryAccessState, + expected_layout: ImageLayout, + current_layout: ImageLayout, +} + +impl ResourcesState { + fn record_buffer_access( + &mut self, + use_ref: &ResourceUseRef, + buffer: &Arc, + range: Range, + stage_access: PipelineStageAccess, + ) { + let range_map = self.buffers.entry(buffer.clone()).or_insert_with(|| { + [(0..buffer.size(), Default::default())] + .into_iter() + .collect() + }); + range_map.split_at(&range.start); + range_map.split_at(&range.end); + + for (_range, state) in range_map.range_mut(&range) { + state.resource_uses.push(*use_ref); + state.memory_access.record_access(use_ref, stage_access); + } + } + + fn record_image_access( + &mut self, + use_ref: &ResourceUseRef, + image: &Arc, + subresource_range: ImageSubresourceRange, + stage_access: PipelineStageAccess, + image_layout: ImageLayout, + ) { + let range_map = self.images.entry(image.clone()).or_insert_with(|| { + [(0..image.range_size(), Default::default())] + .into_iter() + .collect() + }); + + for range in image.iter_ranges(subresource_range) { + range_map.split_at(&range.start); + range_map.split_at(&range.end); + + for (_range, state) in range_map.range_mut(&range) { + if state.resource_uses.is_empty() { + state.expected_layout = image_layout; + } + + state.resource_uses.push(*use_ref); + state.memory_access.record_access(use_ref, stage_access); + } + } + } + + fn record_pipeline_barrier( + &mut self, + command_index: usize, + command_name: &'static str, + dependency_info: &DependencyInfo, + queue_flags: QueueFlags, + ) { + for barrier in &dependency_info.buffer_memory_barriers { + let barrier_scopes = BarrierScopes::from_buffer_memory_barrier(barrier, queue_flags); + let &BufferMemoryBarrier { + src_stages: _, + src_access: _, + dst_stages: _, + dst_access: _, + queue_family_ownership_transfer: _, + ref buffer, + ref range, + _ne: _, + } = barrier; + + let range_map = self.buffers.entry(buffer.clone()).or_insert_with(|| { + [(0..buffer.size(), Default::default())] + .into_iter() + .collect() + }); + range_map.split_at(&range.start); + range_map.split_at(&range.end); + + for (_range, state) in range_map.range_mut(range) { + state.memory_access.record_barrier(&barrier_scopes, None); + } + } + + for (index, barrier) in dependency_info.image_memory_barriers.iter().enumerate() { + let index = index as u32; + let barrier_scopes = BarrierScopes::from_image_memory_barrier(barrier, queue_flags); + let &ImageMemoryBarrier { + src_stages: _, + src_access: _, + dst_stages: _, + dst_access: _, + old_layout, + new_layout, + queue_family_ownership_transfer: _, + ref image, + ref subresource_range, + _ne, + } = barrier; + + // This is only used if there is a layout transition. + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::ImageMemoryBarrier { index }, + secondary_use_ref: None, + }; + let layout_transition = (old_layout != new_layout).then_some(&use_ref); + + let range_map = self.images.entry(image.clone()).or_insert_with(|| { + [(0..image.range_size(), Default::default())] + .into_iter() + .collect() + }); + + for range in image.iter_ranges(subresource_range.clone()) { + range_map.split_at(&range.start); + range_map.split_at(&range.end); + + for (_range, state) in range_map.range_mut(&range) { + if old_layout != new_layout { + if state.resource_uses.is_empty() { + state.expected_layout = old_layout; + } + + state.resource_uses.push(ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::ImageMemoryBarrier { index }, + secondary_use_ref: None, + }); + state.current_layout = new_layout; + } + + state + .memory_access + .record_barrier(&barrier_scopes, layout_transition); + } + } + } + + for barrier in &dependency_info.buffer_memory_barriers { + let &BufferMemoryBarrier { + ref buffer, + ref range, + .. + } = barrier; + + let range_map = self.buffers.get_mut(buffer).unwrap(); + for (_range, state) in range_map.range_mut(range) { + state.memory_access.apply_pending(); + } + } + + for barrier in &dependency_info.image_memory_barriers { + let &ImageMemoryBarrier { + ref image, + ref subresource_range, + .. + } = barrier; + + let range_map = self.images.get_mut(image).unwrap(); + for range in image.iter_ranges(subresource_range.clone()) { + for (_range, state) in range_map.range_mut(&range) { + state.memory_access.apply_pending(); + } + } + } + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +struct MemoryAccessState { + mutable: bool, + last_write: Option, + reads_since_last_write: HashMap, + + /// Pending changes that have not yet been applied. This is used during barrier recording. + pending: Option, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +struct WriteState { + use_ref: ResourceUseRef, + access: PipelineStageAccess, + + /// The `dst_stages` and `dst_access` of all barriers that protect against this write. + barriers_since: PipelineStageAccessSet, + + /// The `dst_stages` of all barriers that form a dependency chain with this write. + dependency_chain: PipelineStages, + + /// The union of all `barriers_since` of all `reads_since_last_write`. + read_barriers_since: PipelineStages, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +struct PendingWriteState { + /// If this is `Some`, then the barrier is treated as a new write, + /// and the previous `last_write` is discarded. + /// Otherwise, the values below are added to the existing `last_write`. + layout_transition: Option, + + barriers_since: PipelineStageAccessSet, + dependency_chain: PipelineStages, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +struct ReadState { + use_ref: ResourceUseRef, + access: PipelineStageAccess, + + /// The `dst_stages` of all barriers that protect against this read. + /// This always includes the stage of `self`. + barriers_since: PipelineStages, + + /// Stages of reads recorded after this read, + /// that were in scope of `barriers_since` at the time of recording. + /// This always includes the stage of `self`. + barriered_reads_since: PipelineStages, + + /// Pending changes that have not yet been applied. This is used during barrier recording. + pending: Option, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +struct PendingReadState { + barriers_since: PipelineStages, +} + +impl MemoryAccessState { + fn record_access(&mut self, use_ref: &ResourceUseRef, access: PipelineStageAccess) { + if access.is_write() { + self.mutable = true; + self.last_write = Some(WriteState { + use_ref: *use_ref, + access, + barriers_since: Default::default(), + dependency_chain: Default::default(), + read_barriers_since: Default::default(), + }); + self.reads_since_last_write.clear(); + } else { + let pipeline_stage = PipelineStage::try_from(access).unwrap(); + let pipeline_stages = PipelineStages::from(pipeline_stage); + + for read_state in self.reads_since_last_write.values_mut() { + if read_state.barriers_since.intersects(pipeline_stages) { + read_state.barriered_reads_since |= pipeline_stages; + } else { + read_state.barriered_reads_since -= pipeline_stages; + } + } + + self.reads_since_last_write.insert( + pipeline_stage, + ReadState { + use_ref: *use_ref, + access, + barriers_since: pipeline_stages, + barriered_reads_since: pipeline_stages, + pending: None, + }, + ); + } + } + + fn record_barrier( + &mut self, + barrier_scopes: &BarrierScopes, + layout_transition: Option<&ResourceUseRef>, + ) { + let skip_reads = if let Some(use_ref) = layout_transition { + let pending = self.pending.get_or_insert_with(Default::default); + pending.layout_transition = Some(*use_ref); + true + } else { + self.pending + .map_or(false, |pending| pending.layout_transition.is_some()) + }; + + // If the last write is in the src scope of the barrier, then add the dst scopes. + // If the barrier includes a layout transition, then that layout transition is + // considered the last write, and it is always in the src scope of the barrier. + if layout_transition.is_some() + || self.last_write.as_ref().map_or(false, |write_state| { + barrier_scopes + .src_access_scope + .contains_enum(write_state.access) + || barrier_scopes + .src_exec_scope + .intersects(write_state.dependency_chain) + }) + { + let pending = self.pending.get_or_insert_with(Default::default); + pending.barriers_since |= barrier_scopes.dst_access_scope; + pending.dependency_chain |= barrier_scopes.dst_exec_scope; + } + + // A layout transition counts as a write, which means that `reads_since_last_write` will + // be cleared when applying pending operations. + // Therefore, there is no need to update the reads. + if !skip_reads { + // Gather all reads for which `barriers_since` is in the barrier's `src_exec_scope`. + let reads_in_src_exec_scope = self.reads_since_last_write.iter().fold( + PipelineStages::empty(), + |total, (&stage, read_state)| { + if barrier_scopes + .src_exec_scope + .intersects(read_state.barriers_since) + { + total.union(stage.into()) + } else { + total + } + }, + ); + + for read_state in self.reads_since_last_write.values_mut() { + if reads_in_src_exec_scope.intersects(read_state.barriered_reads_since) { + let pending = read_state.pending.get_or_insert_with(Default::default); + pending.barriers_since |= barrier_scopes.dst_exec_scope; + } + } + } + } + + fn apply_pending(&mut self) { + if let Some(PendingWriteState { + layout_transition, + barriers_since, + dependency_chain, + }) = self.pending.take() + { + // If there is a pending layout transition, it is treated as the new `last_write`. + if let Some(use_ref) = layout_transition { + self.mutable = true; + self.last_write = Some(WriteState { + use_ref, + access: PipelineStageAccess::ImageLayoutTransition, + barriers_since, + dependency_chain, + read_barriers_since: Default::default(), + }); + self.reads_since_last_write.clear(); + } else if let Some(write_state) = &mut self.last_write { + write_state.barriers_since |= barriers_since; + write_state.dependency_chain |= dependency_chain; + } + } + + for read_state in self.reads_since_last_write.values_mut() { + if let Some(PendingReadState { barriers_since }) = read_state.pending.take() { + read_state.barriers_since |= barriers_since; + + if let Some(write_state) = &mut self.last_write { + write_state.read_barriers_since |= read_state.barriers_since; + } + } + } + } +} + +struct BarrierScopes { + src_exec_scope: PipelineStages, + src_access_scope: PipelineStageAccessSet, + dst_exec_scope: PipelineStages, + dst_access_scope: PipelineStageAccessSet, +} + +impl BarrierScopes { + fn from_buffer_memory_barrier(barrier: &BufferMemoryBarrier, queue_flags: QueueFlags) -> Self { + let src_stages_expanded = barrier.src_stages.expand(queue_flags); + let src_exec_scope = src_stages_expanded.with_earlier(); + let src_access_scope = PipelineStageAccessSet::from(barrier.src_access) + & PipelineStageAccessSet::from(src_stages_expanded); + + let dst_stages_expanded = barrier.dst_stages.expand(queue_flags); + let dst_exec_scope = dst_stages_expanded.with_later(); + let dst_access_scope = PipelineStageAccessSet::from(barrier.dst_access) + & PipelineStageAccessSet::from(dst_stages_expanded); + + Self { + src_exec_scope, + src_access_scope, + dst_exec_scope, + dst_access_scope, + } + } + + fn from_image_memory_barrier(barrier: &ImageMemoryBarrier, queue_flags: QueueFlags) -> Self { + let src_stages_expanded = barrier.src_stages.expand(queue_flags); + let src_exec_scope = src_stages_expanded.with_earlier(); + let src_access_scope = PipelineStageAccessSet::from(barrier.src_access) + & PipelineStageAccessSet::from(src_stages_expanded); + + let dst_stages_expanded = barrier.dst_stages.expand(queue_flags); + let dst_exec_scope = dst_stages_expanded.with_later(); + let dst_access_scope = PipelineStageAccessSet::from(barrier.dst_access) + & PipelineStageAccessSet::from(dst_stages_expanded); + + Self { + src_exec_scope, + src_access_scope, + dst_exec_scope, + dst_access_scope, + } + } +} diff --git a/vulkano/src/command_buffer/standard/builder/pipeline.rs b/vulkano/src/command_buffer/standard/builder/pipeline.rs index 0d5d1c0f..607d72c3 100644 --- a/vulkano/src/command_buffer/standard/builder/pipeline.rs +++ b/vulkano/src/command_buffer/standard/builder/pipeline.rs @@ -7,28 +7,36 @@ // notice may not be copied, modified, or distributed except // according to those terms. -use super::{CommandBufferBuilder, PipelineExecutionError, RenderPassState, RenderPassStateType}; +use super::{ + CommandBufferBuilder, DescriptorSetState, PipelineExecutionError, RenderPassState, + RenderPassStateType, ResourcesState, +}; use crate::{ buffer::{view::BufferViewAbstract, BufferAccess, BufferUsage, TypedBufferAccess}, command_buffer::{ allocator::CommandBufferAllocator, commands::pipeline::DescriptorResourceInvalidError, - DispatchIndirectCommand, DrawIndexedIndirectCommand, DrawIndirectCommand, SubpassContents, + DispatchIndirectCommand, DrawIndexedIndirectCommand, DrawIndirectCommand, + ResourceInCommand, ResourceUseRef, SubpassContents, }, descriptor_set::{layout::DescriptorType, DescriptorBindingResources}, device::{DeviceOwned, QueueFlags}, format::FormatFeatures, - image::{ImageAspects, ImageViewAbstract, SampleCount}, + image::{ImageAccess, ImageAspects, ImageViewAbstract, SampleCount}, pipeline::{ graphics::{ - input_assembly::PrimitiveTopology, render_pass::PipelineRenderPassType, + input_assembly::{IndexType, PrimitiveTopology}, + render_pass::PipelineRenderPassType, vertex_input::VertexInputRate, }, - DynamicState, GraphicsPipeline, PartialStateMode, Pipeline, PipelineLayout, + DynamicState, GraphicsPipeline, PartialStateMode, Pipeline, PipelineBindPoint, + PipelineLayout, }, sampler::Sampler, - shader::{DescriptorBindingRequirements, ShaderScalarType, ShaderStage}, + shader::{DescriptorBindingRequirements, ShaderScalarType, ShaderStage, ShaderStages}, + sync::PipelineStageAccess, RequiresOneOf, VulkanObject, }; +use ahash::HashMap; use std::{cmp::min, mem::size_of, sync::Arc}; impl CommandBufferBuilder @@ -69,13 +77,13 @@ where } // VUID-vkCmdDispatch-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(PipelineExecutionError::ForbiddenInsideRenderPass); } // VUID-vkCmdDispatch-None-02700 let pipeline = self - .current_state + .builder_state .pipeline_compute .as_ref() .ok_or(PipelineExecutionError::PipelineNotBound)? @@ -115,8 +123,23 @@ where group_counts[2], ); - // TODO: sync state update + let command_index = self.next_command_index; + let command_name = "dispatch"; + let pipeline = self + .builder_state + .pipeline_compute + .as_ref() + .unwrap() + .as_ref(); + record_descriptor_sets_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.descriptor_sets, + pipeline, + ); + self.next_command_index += 1; self } @@ -158,13 +181,13 @@ where } // VUID-vkCmdDispatchIndirect-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(PipelineExecutionError::ForbiddenInsideRenderPass); } // VUID-vkCmdDispatchIndirect-None-02700 let pipeline = self - .current_state + .builder_state .pipeline_compute .as_ref() .ok_or(PipelineExecutionError::PipelineNotBound)? @@ -193,10 +216,31 @@ where indirect_buffer_inner.offset, ); + let command_index = self.next_command_index; + let command_name = "dispatch_indirect"; + let pipeline = self + .builder_state + .pipeline_compute + .as_ref() + .unwrap() + .as_ref(); + record_descriptor_sets_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.descriptor_sets, + pipeline, + ); + record_indirect_buffer_access( + &mut self.resources_usage_state, + command_index, + command_name, + &indirect_buffer, + ); + self.resources.push(Box::new(indirect_buffer)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -242,14 +286,14 @@ where ) -> Result<(), PipelineExecutionError> { // VUID-vkCmdDraw-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(PipelineExecutionError::ForbiddenOutsideRenderPass)?; // VUID-vkCmdDraw-None-02700 let pipeline = self - .current_state + .builder_state .pipeline_graphics .as_ref() .ok_or(PipelineExecutionError::PipelineNotBound)? @@ -287,14 +331,36 @@ where first_instance, ); + let command_index = self.next_command_index; + let command_name = "draw"; + let pipeline = self + .builder_state + .pipeline_graphics + .as_ref() + .unwrap() + .as_ref(); + record_descriptor_sets_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.descriptor_sets, + pipeline, + ); + record_vertex_buffers_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.vertex_buffers, + pipeline, + ); + if let RenderPassStateType::BeginRendering(state) = - &mut self.current_state.render_pass.as_mut().unwrap().render_pass + &mut self.builder_state.render_pass.as_mut().unwrap().render_pass { state.pipeline_used = true; } - // TODO: sync state update - + self.next_command_index += 1; self } @@ -340,14 +406,14 @@ where ) -> Result<(), PipelineExecutionError> { // VUID-vkCmdDrawIndirect-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(PipelineExecutionError::ForbiddenOutsideRenderPass)?; // VUID-vkCmdDrawIndirect-None-02700 let pipeline = self - .current_state + .builder_state .pipeline_graphics .as_ref() .ok_or(PipelineExecutionError::PipelineNotBound)? @@ -409,16 +475,44 @@ where stride, ); + let command_index = self.next_command_index; + let command_name = "draw_indirect"; + let pipeline = self + .builder_state + .pipeline_graphics + .as_ref() + .unwrap() + .as_ref(); + record_descriptor_sets_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.descriptor_sets, + pipeline, + ); + record_vertex_buffers_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.vertex_buffers, + pipeline, + ); + record_indirect_buffer_access( + &mut self.resources_usage_state, + command_index, + command_name, + &indirect_buffer, + ); + if let RenderPassStateType::BeginRendering(state) = - &mut self.current_state.render_pass.as_mut().unwrap().render_pass + &mut self.builder_state.render_pass.as_mut().unwrap().render_pass { state.pipeline_used = true; } self.resources.push(Box::new(indirect_buffer)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -487,14 +581,14 @@ where // VUID-vkCmdDrawIndexed-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(PipelineExecutionError::ForbiddenOutsideRenderPass)?; // VUID-vkCmdDrawIndexed-None-02700 let pipeline = self - .current_state + .builder_state .pipeline_graphics .as_ref() .ok_or(PipelineExecutionError::PipelineNotBound)? @@ -536,14 +630,42 @@ where first_instance, ); + let command_index = self.next_command_index; + let command_name = "draw_indexed"; + let pipeline = self + .builder_state + .pipeline_graphics + .as_ref() + .unwrap() + .as_ref(); + record_descriptor_sets_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.descriptor_sets, + pipeline, + ); + record_vertex_buffers_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.vertex_buffers, + pipeline, + ); + record_index_buffer_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.index_buffer, + ); + if let RenderPassStateType::BeginRendering(state) = - &mut self.current_state.render_pass.as_mut().unwrap().render_pass + &mut self.builder_state.render_pass.as_mut().unwrap().render_pass { state.pipeline_used = true; } - // TODO: sync state update - + self.next_command_index += 1; self } @@ -596,14 +718,14 @@ where ) -> Result<(), PipelineExecutionError> { // VUID-vkCmdDrawIndexedIndirect-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(PipelineExecutionError::ForbiddenOutsideRenderPass)?; // VUID-vkCmdDrawIndexedIndirect-None-02700 let pipeline = self - .current_state + .builder_state .pipeline_graphics .as_ref() .ok_or(PipelineExecutionError::PipelineNotBound)? @@ -666,16 +788,50 @@ where stride, ); + let command_index = self.next_command_index; + let command_name = "draw_indexed_indirect"; + let pipeline = self + .builder_state + .pipeline_graphics + .as_ref() + .unwrap() + .as_ref(); + record_descriptor_sets_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.descriptor_sets, + pipeline, + ); + record_vertex_buffers_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.vertex_buffers, + pipeline, + ); + record_index_buffer_access( + &mut self.resources_usage_state, + command_index, + command_name, + &self.builder_state.index_buffer, + ); + record_indirect_buffer_access( + &mut self.resources_usage_state, + command_index, + command_name, + &indirect_buffer, + ); + if let RenderPassStateType::BeginRendering(state) = - &mut self.current_state.render_pass.as_mut().unwrap().render_pass + &mut self.builder_state.render_pass.as_mut().unwrap().render_pass { state.pipeline_used = true; } self.resources.push(Box::new(indirect_buffer)); - // TODO: sync state update - + self.next_command_index += 1; self } @@ -685,7 +841,7 @@ where ) -> Result<(), PipelineExecutionError> { // VUID? let (index_buffer, index_type) = self - .current_state + .builder_state .index_buffer .as_ref() .ok_or(PipelineExecutionError::IndexBufferNotBound)?; @@ -787,7 +943,7 @@ where // VUID-vkCmdDispatch-None-02697 let descriptor_set_state = self - .current_state + .builder_state .descriptor_sets .get(&pipeline.bind_point()) .ok_or(PipelineExecutionError::PipelineLayoutNotCompatible)?; @@ -1137,7 +1293,7 @@ where // VUID-vkCmdDispatch-maintenance4-06425 let constants_pipeline_layout = self - .current_state + .builder_state .push_constants_pipeline_layout .as_ref() .ok_or(PipelineExecutionError::PushConstantsMissing)?; @@ -1150,7 +1306,7 @@ where return Err(PipelineExecutionError::PushConstantsNotCompatible); } - let set_bytes = &self.current_state.push_constants; + let set_bytes = &self.builder_state.push_constants; // VUID-vkCmdDispatch-maintenance4-06425 if !pipeline_layout @@ -1179,13 +1335,13 @@ where match dynamic_state { DynamicState::BlendConstants => { // VUID? - if self.current_state.blend_constants.is_none() { + if self.builder_state.blend_constants.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::ColorWriteEnable => { // VUID-vkCmdDraw-attachmentCount-06667 - let enables = self.current_state.color_write_enable.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?; + let enables = self.builder_state.color_write_enable.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?; // VUID-vkCmdDraw-attachmentCount-06667 if enables.len() < pipeline.color_blend_state().unwrap().attachments.len() { @@ -1203,49 +1359,49 @@ where } DynamicState::CullMode => { // VUID? - if self.current_state.cull_mode.is_none() { + if self.builder_state.cull_mode.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::DepthBias => { // VUID? - if self.current_state.depth_bias.is_none() { + if self.builder_state.depth_bias.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::DepthBiasEnable => { // VUID-vkCmdDraw-None-04877 - if self.current_state.depth_bias_enable.is_none() { + if self.builder_state.depth_bias_enable.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::DepthBounds => { // VUID? - if self.current_state.depth_bounds.is_none() { + if self.builder_state.depth_bounds.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::DepthBoundsTestEnable => { // VUID? - if self.current_state.depth_bounds_test_enable.is_none() { + if self.builder_state.depth_bounds_test_enable.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::DepthCompareOp => { // VUID? - if self.current_state.depth_compare_op.is_none() { + if self.builder_state.depth_compare_op.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::DepthTestEnable => { // VUID? - if self.current_state.depth_test_enable.is_none() { + if self.builder_state.depth_test_enable.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::DepthWriteEnable => { // VUID? - if self.current_state.depth_write_enable.is_none() { + if self.builder_state.depth_write_enable.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } @@ -1260,7 +1416,7 @@ where for num in 0..discard_rectangle_count { // VUID? - if !self.current_state.discard_rectangle.contains_key(&num) { + if !self.builder_state.discard_rectangle.contains_key(&num) { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } @@ -1269,38 +1425,38 @@ where DynamicState::FragmentShadingRate => todo!(), DynamicState::FrontFace => { // VUID? - if self.current_state.front_face.is_none() { + if self.builder_state.front_face.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::LineStipple => { // VUID? - if self.current_state.line_stipple.is_none() { + if self.builder_state.line_stipple.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::LineWidth => { // VUID? - if self.current_state.line_width.is_none() { + if self.builder_state.line_width.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::LogicOp => { // VUID-vkCmdDraw-logicOp-04878 - if self.current_state.logic_op.is_none() { + if self.builder_state.logic_op.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::PatchControlPoints => { // VUID-vkCmdDraw-None-04875 - if self.current_state.patch_control_points.is_none() { + if self.builder_state.patch_control_points.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } DynamicState::PrimitiveRestartEnable => { // VUID-vkCmdDraw-None-04879 let primitive_restart_enable = - if let Some(enable) = self.current_state.primitive_restart_enable { + if let Some(enable) = self.builder_state.primitive_restart_enable { enable } else { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); @@ -1310,7 +1466,7 @@ where let topology = match pipeline.input_assembly_state().topology { PartialStateMode::Fixed(topology) => topology, PartialStateMode::Dynamic(_) => { - if let Some(topology) = self.current_state.primitive_topology { + if let Some(topology) = self.builder_state.primitive_topology { topology } else { return Err(PipelineExecutionError::DynamicStateNotSet { @@ -1364,7 +1520,7 @@ where } DynamicState::PrimitiveTopology => { // VUID-vkCmdDraw-primitiveTopology-03420 - let topology = if let Some(topology) = self.current_state.primitive_topology { + let topology = if let Some(topology) = self.builder_state.primitive_topology { topology } else { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); @@ -1405,7 +1561,7 @@ where } DynamicState::RasterizerDiscardEnable => { // VUID-vkCmdDraw-None-04876 - if self.current_state.rasterizer_discard_enable.is_none() { + if self.builder_state.rasterizer_discard_enable.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } @@ -1416,7 +1572,7 @@ where DynamicState::Scissor => { for num in 0..pipeline.viewport_state().unwrap().count().unwrap() { // VUID? - if !self.current_state.scissor.contains_key(&num) { + if !self.builder_state.scissor.contains_key(&num) { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } @@ -1424,7 +1580,7 @@ where DynamicState::ScissorWithCount => { // VUID-vkCmdDraw-scissorCount-03418 // VUID-vkCmdDraw-viewportCount-03419 - let scissor_count = self.current_state.scissor_with_count.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?.len() as u32; + let scissor_count = self.builder_state.scissor_with_count.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?.len() as u32; // Check if the counts match, but only if the viewport count is fixed. // If the viewport count is also dynamic, then the @@ -1442,7 +1598,7 @@ where } } DynamicState::StencilCompareMask => { - let state = self.current_state.stencil_compare_mask; + let state = self.builder_state.stencil_compare_mask; // VUID? if state.front.is_none() || state.back.is_none() { @@ -1450,7 +1606,7 @@ where } } DynamicState::StencilOp => { - let state = self.current_state.stencil_op; + let state = self.builder_state.stencil_op; // VUID? if state.front.is_none() || state.back.is_none() { @@ -1458,7 +1614,7 @@ where } } DynamicState::StencilReference => { - let state = self.current_state.stencil_reference; + let state = self.builder_state.stencil_reference; // VUID? if state.front.is_none() || state.back.is_none() { @@ -1467,14 +1623,14 @@ where } DynamicState::StencilTestEnable => { // VUID? - if self.current_state.stencil_test_enable.is_none() { + if self.builder_state.stencil_test_enable.is_none() { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } // TODO: Check if the stencil buffer is writable } DynamicState::StencilWriteMask => { - let state = self.current_state.stencil_write_mask; + let state = self.builder_state.stencil_write_mask; // VUID? if state.front.is_none() || state.back.is_none() { @@ -1486,7 +1642,7 @@ where DynamicState::Viewport => { for num in 0..pipeline.viewport_state().unwrap().count().unwrap() { // VUID? - if !self.current_state.viewport.contains_key(&num) { + if !self.builder_state.viewport.contains_key(&num) { return Err(PipelineExecutionError::DynamicStateNotSet { dynamic_state }); } } @@ -1495,7 +1651,7 @@ where DynamicState::ViewportShadingRatePalette => todo!(), DynamicState::ViewportWithCount => { // VUID-vkCmdDraw-viewportCount-03417 - let viewport_count = self.current_state.viewport_with_count.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?.len() as u32; + let viewport_count = self.builder_state.viewport_with_count.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?.len() as u32; let scissor_count = if let Some(scissor_count) = pipeline.viewport_state().unwrap().count() @@ -1505,7 +1661,7 @@ where } else { // VUID-vkCmdDraw-viewportCount-03419 // The scissor count is also dynamic. - self.current_state.scissor_with_count.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?.len() as u32 + self.builder_state.scissor_with_count.as_ref().ok_or(PipelineExecutionError::DynamicStateNotSet { dynamic_state })?.len() as u32 }; // VUID-vkCmdDraw-viewportCount-03417 @@ -1710,7 +1866,7 @@ where for (&binding_num, binding_desc) in &vertex_input.bindings { // VUID-vkCmdDraw-None-04007 - let vertex_buffer = match self.current_state.vertex_buffers.get(&binding_num) { + let vertex_buffer = match self.builder_state.vertex_buffers.get(&binding_num) { Some(x) => x, None => return Err(PipelineExecutionError::VertexBufferNotBound { binding_num }), }; @@ -1807,3 +1963,252 @@ where Ok(()) } } + +fn record_descriptor_sets_access( + resources_usage_state: &mut ResourcesState, + command_index: usize, + command_name: &'static str, + descriptor_sets_state: &HashMap, + pipeline: &impl Pipeline, +) { + let descriptor_sets_state = match descriptor_sets_state.get(&pipeline.bind_point()) { + Some(x) => x, + None => return, + }; + + for (&(set, binding), binding_reqs) in pipeline.descriptor_binding_requirements() { + let descriptor_type = descriptor_sets_state.pipeline_layout.set_layouts()[set as usize] + .bindings()[&binding] + .descriptor_type; + + // TODO: Should input attachments be handled here or in attachment access? + if descriptor_type == DescriptorType::InputAttachment { + continue; + } + + let use_iter = move |index: u32| { + let (stages_read, stages_write) = [Some(index), None] + .into_iter() + .filter_map(|index| binding_reqs.descriptors.get(&index)) + .fold( + (ShaderStages::empty(), ShaderStages::empty()), + |(stages_read, stages_write), desc_reqs| { + ( + stages_read | desc_reqs.memory_read, + stages_write | desc_reqs.memory_write, + ) + }, + ); + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::DescriptorSet { + set, + binding, + index, + }, + secondary_use_ref: None, + }; + let stage_access_iter = PipelineStageAccess::iter_descriptor_stages( + descriptor_type, + stages_read, + stages_write, + ); + (use_ref, stage_access_iter) + }; + + match descriptor_sets_state.descriptor_sets[&set] + .resources() + .binding(binding) + .unwrap() + { + DescriptorBindingResources::None(_) => continue, + DescriptorBindingResources::Buffer(elements) => { + for (index, element) in elements.iter().enumerate() { + if let Some(buffer) = element { + let buffer_inner = buffer.inner(); + let (use_ref, stage_access_iter) = use_iter(index as u32); + + let mut range = 0..buffer.size(); // TODO: + range.start += buffer_inner.offset; + range.end += buffer_inner.offset; + + for stage_access in stage_access_iter { + resources_usage_state.record_buffer_access( + &use_ref, + buffer_inner.buffer, + range.clone(), + stage_access, + ); + } + } + } + } + DescriptorBindingResources::BufferView(elements) => { + for (index, element) in elements.iter().enumerate() { + if let Some(buffer_view) = element { + let buffer = buffer_view.buffer(); + let buffer_inner = buffer.inner(); + let (use_ref, stage_access_iter) = use_iter(index as u32); + + let mut range = buffer_view.range(); + range.start += buffer_inner.offset; + range.end += buffer_inner.offset; + + for stage_access in stage_access_iter { + resources_usage_state.record_buffer_access( + &use_ref, + buffer_inner.buffer, + range.clone(), + stage_access, + ); + } + } + } + } + DescriptorBindingResources::ImageView(elements) => { + for (index, element) in elements.iter().enumerate() { + if let Some(image_view) = element { + let image = image_view.image(); + let image_inner = image.inner(); + let layout = image + .descriptor_layouts() + .expect( + "descriptor_layouts must return Some when used in an image view", + ) + .layout_for(descriptor_type); + let (use_ref, stage_access_iter) = use_iter(index as u32); + + let mut subresource_range = image_view.subresource_range().clone(); + subresource_range.array_layers.start += image_inner.first_layer; + subresource_range.array_layers.end += image_inner.first_layer; + subresource_range.mip_levels.start += image_inner.first_mipmap_level; + subresource_range.mip_levels.end += image_inner.first_mipmap_level; + + for stage_access in stage_access_iter { + resources_usage_state.record_image_access( + &use_ref, + image_inner.image, + subresource_range.clone(), + stage_access, + layout, + ); + } + } + } + } + DescriptorBindingResources::ImageViewSampler(elements) => { + for (index, element) in elements.iter().enumerate() { + if let Some((image_view, _)) = element { + let image = image_view.image(); + let image_inner = image.inner(); + let layout = image + .descriptor_layouts() + .expect( + "descriptor_layouts must return Some when used in an image view", + ) + .layout_for(descriptor_type); + let (use_ref, stage_access_iter) = use_iter(index as u32); + + let mut subresource_range = image_view.subresource_range().clone(); + subresource_range.array_layers.start += image_inner.first_layer; + subresource_range.array_layers.end += image_inner.first_layer; + subresource_range.mip_levels.start += image_inner.first_mipmap_level; + subresource_range.mip_levels.end += image_inner.first_mipmap_level; + + for stage_access in stage_access_iter { + resources_usage_state.record_image_access( + &use_ref, + image_inner.image, + subresource_range.clone(), + stage_access, + layout, + ); + } + } + } + } + DescriptorBindingResources::Sampler(_) => (), + } + } +} + +fn record_vertex_buffers_access( + resources_usage_state: &mut ResourcesState, + command_index: usize, + command_name: &'static str, + vertex_buffers_state: &HashMap>, + pipeline: &GraphicsPipeline, +) { + for &binding in pipeline.vertex_input_state().bindings.keys() { + let buffer = &vertex_buffers_state[&binding]; + let buffer_inner = buffer.inner(); + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::VertexBuffer { binding }, + secondary_use_ref: None, + }; + + let mut range = 0..buffer.size(); // TODO: take range from draw command + range.start += buffer_inner.offset; + range.end += buffer_inner.offset; + resources_usage_state.record_buffer_access( + &use_ref, + buffer_inner.buffer, + range, + PipelineStageAccess::VertexAttributeInput_VertexAttributeRead, + ); + } +} + +fn record_index_buffer_access( + resources_usage_state: &mut ResourcesState, + command_index: usize, + command_name: &'static str, + index_buffer_state: &Option<(Arc, IndexType)>, +) { + let buffer = &index_buffer_state.as_ref().unwrap().0; + let buffer_inner = buffer.inner(); + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::IndexBuffer, + secondary_use_ref: None, + }; + + let mut range = 0..buffer.size(); // TODO: take range from draw command + range.start += buffer_inner.offset; + range.end += buffer_inner.offset; + resources_usage_state.record_buffer_access( + &use_ref, + buffer_inner.buffer, + range, + PipelineStageAccess::IndexInput_IndexRead, + ); +} + +fn record_indirect_buffer_access( + resources_usage_state: &mut ResourcesState, + command_index: usize, + command_name: &'static str, + buffer: &Arc, +) { + let buffer_inner = buffer.inner(); + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::IndirectBuffer, + secondary_use_ref: None, + }; + + let mut range = 0..buffer.size(); // TODO: take range from draw command + range.start += buffer_inner.offset; + range.end += buffer_inner.offset; + resources_usage_state.record_buffer_access( + &use_ref, + buffer_inner.buffer, + range, + PipelineStageAccess::DrawIndirect_IndirectCommandRead, + ); +} diff --git a/vulkano/src/command_buffer/standard/builder/query.rs b/vulkano/src/command_buffer/standard/builder/query.rs index aad75d5d..086b97f0 100644 --- a/vulkano/src/command_buffer/standard/builder/query.rs +++ b/vulkano/src/command_buffer/standard/builder/query.rs @@ -9,11 +9,11 @@ use super::{CommandBufferBuilder, QueryError, QueryState}; use crate::{ - buffer::{BufferUsage, TypedBufferAccess}, - command_buffer::allocator::CommandBufferAllocator, + buffer::{BufferAccess, BufferUsage, TypedBufferAccess}, + command_buffer::{allocator::CommandBufferAllocator, ResourceInCommand, ResourceUseRef}, device::{DeviceOwned, QueueFlags}, query::{QueryControlFlags, QueryPool, QueryResultElement, QueryResultFlags, QueryType}, - sync::{PipelineStage, PipelineStages}, + sync::{PipelineStage, PipelineStageAccess, PipelineStages}, DeviceSize, RequiresOneOf, Version, VulkanObject, }; use std::{ops::Range, sync::Arc}; @@ -122,14 +122,14 @@ where // VUID-vkCmdBeginQuery-queryPool-01922 if self - .current_state + .builder_state .queries .contains_key(&query_pool.query_type().into()) { return Err(QueryError::QueryIsActive); } - if let Some(render_pass_state) = &self.current_state.render_pass { + if let Some(render_pass_state) = &self.builder_state.render_pass { // VUID-vkCmdBeginQuery-query-00808 if query + render_pass_state.view_mask.count_ones() > query_pool.query_count() { return Err(QueryError::OutOfRangeMultiview); @@ -154,19 +154,20 @@ where (fns.v1_0.cmd_begin_query)(self.handle(), query_pool.handle(), query, flags.into()); let ty = query_pool.query_type(); - self.current_state.queries.insert( + self.builder_state.queries.insert( ty.into(), QueryState { query_pool: query_pool.handle(), query, ty, flags, - in_subpass: self.current_state.render_pass.is_some(), + in_subpass: self.builder_state.render_pass.is_some(), }, ); self.resources.push(Box::new(query_pool)); + self.next_command_index += 1; self } @@ -200,7 +201,7 @@ where // VUID-vkCmdEndQuery-None-01923 if !self - .current_state + .builder_state .queries .get(&query_pool.query_type().into()) .map_or(false, |state| { @@ -213,7 +214,7 @@ where // VUID-vkCmdEndQuery-query-00810 query_pool.query(query).ok_or(QueryError::OutOfRange)?; - if let Some(render_pass_state) = &self.current_state.render_pass { + if let Some(render_pass_state) = &self.builder_state.render_pass { // VUID-vkCmdEndQuery-query-00812 if query + render_pass_state.view_mask.count_ones() > query_pool.query_count() { return Err(QueryError::OutOfRangeMultiview); @@ -232,12 +233,13 @@ where let fns = self.device().fns(); (fns.v1_0.cmd_end_query)(self.handle(), query_pool.handle(), query); - self.current_state + self.builder_state .queries .remove(&query_pool.query_type().into()); self.resources.push(Box::new(query_pool)); + self.next_command_index += 1; self } @@ -444,7 +446,7 @@ where // VUID-vkCmdWriteTimestamp2-query-04903 query_pool.query(query).ok_or(QueryError::OutOfRange)?; - if let Some(render_pass_state) = &self.current_state.render_pass { + if let Some(render_pass_state) = &self.builder_state.render_pass { // VUID-vkCmdWriteTimestamp2-query-03865 if query + render_pass_state.view_mask.count_ones() > query_pool.query_count() { return Err(QueryError::OutOfRangeMultiview); @@ -491,6 +493,7 @@ where self.resources.push(Box::new(query_pool)); + self.next_command_index += 1; self } @@ -514,7 +517,7 @@ where &mut self, query_pool: Arc, queries: Range, - destination: Arc, + dst_buffer: Arc, flags: QueryResultFlags, ) -> Result<&mut Self, QueryError> where @@ -524,7 +527,7 @@ where self.validate_copy_query_pool_results( &query_pool, queries.clone(), - destination.as_ref(), + dst_buffer.as_ref(), flags, )?; @@ -532,13 +535,8 @@ where let per_query_len = query_pool.query_type().result_len() + flags.intersects(QueryResultFlags::WITH_AVAILABILITY) as DeviceSize; let stride = per_query_len * std::mem::size_of::() as DeviceSize; - Ok(self.copy_query_pool_results_unchecked( - query_pool, - queries, - destination, - stride, - flags, - )) + Ok(self + .copy_query_pool_results_unchecked(query_pool, queries, dst_buffer, stride, flags)) } } @@ -546,7 +544,7 @@ where &self, query_pool: &QueryPool, queries: Range, - destination: &D, + dst_buffer: &D, flags: QueryResultFlags, ) -> Result<(), QueryError> where @@ -564,18 +562,18 @@ where } // VUID-vkCmdCopyQueryPoolResults-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(QueryError::ForbiddenInsideRenderPass); } let device = self.device(); - let buffer_inner = destination.inner(); + let buffer_inner = dst_buffer.inner(); // VUID-vkCmdCopyQueryPoolResults-commonparent assert_eq!(device, buffer_inner.buffer.device()); assert_eq!(device, query_pool.device()); - assert!(destination.len() > 0); + assert!(dst_buffer.len() > 0); // VUID-vkCmdCopyQueryPoolResults-flags-00822 // VUID-vkCmdCopyQueryPoolResults-flags-00823 @@ -593,10 +591,10 @@ where let required_len = per_query_len * count as DeviceSize; // VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824 - if destination.len() < required_len { + if dst_buffer.len() < required_len { return Err(QueryError::BufferTooSmall { required_len, - actual_len: destination.len(), + actual_len: dst_buffer.len(), }); } @@ -626,7 +624,7 @@ where &mut self, query_pool: Arc, queries: Range, - destination: Arc, + dst_buffer: Arc, stride: DeviceSize, flags: QueryResultFlags, ) -> &mut Self @@ -634,7 +632,7 @@ where D: TypedBufferAccess + 'static, T: QueryResultElement, { - let destination_inner = destination.inner(); + let dst_buffer_inner = dst_buffer.inner(); let fns = self.device().fns(); (fns.v1_0.cmd_copy_query_pool_results)( @@ -642,17 +640,35 @@ where query_pool.handle(), queries.start, queries.end - queries.start, - destination_inner.buffer.handle(), - destination_inner.offset, + dst_buffer_inner.buffer.handle(), + dst_buffer_inner.offset, stride, ash::vk::QueryResultFlags::from(flags) | T::FLAG, ); + let command_index = self.next_command_index; + let command_name = "copy_query_pool_results"; + let use_ref = ResourceUseRef { + command_index, + command_name, + resource_in_command: ResourceInCommand::Destination, + secondary_use_ref: None, + }; + + let mut dst_range = 0..dst_buffer.size(); // TODO: + dst_range.start += dst_buffer_inner.offset; + dst_range.end += dst_buffer_inner.offset; + self.resources_usage_state.record_buffer_access( + &use_ref, + dst_buffer_inner.buffer, + dst_range, + PipelineStageAccess::Copy_TransferWrite, + ); + self.resources.push(Box::new(query_pool)); - self.resources.push(Box::new(destination)); - - // TODO: sync state update + self.resources.push(Box::new(dst_buffer)); + self.next_command_index += 1; self } @@ -680,7 +696,7 @@ where queries: Range, ) -> Result<(), QueryError> { // VUID-vkCmdResetQueryPool-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(QueryError::ForbiddenInsideRenderPass); } @@ -707,7 +723,7 @@ where // VUID-vkCmdResetQueryPool-None-02841 if self - .current_state + .builder_state .queries .values() .any(|state| state.query_pool == query_pool.handle() && queries.contains(&state.query)) @@ -734,6 +750,7 @@ where self.resources.push(Box::new(query_pool)); + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/render_pass.rs b/vulkano/src/command_buffer/standard/builder/render_pass.rs index 81098915..14cf73fc 100644 --- a/vulkano/src/command_buffer/standard/builder/render_pass.rs +++ b/vulkano/src/command_buffer/standard/builder/render_pass.rs @@ -71,7 +71,7 @@ where } // VUID-vkCmdBeginRenderPass2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(RenderPassError::ForbiddenInsideRenderPass); } @@ -446,7 +446,7 @@ where let subpass = render_pass.clone().first_subpass(); let view_mask = subpass.subpass_desc().view_mask; - self.current_state.render_pass = Some(RenderPassState { + self.builder_state.render_pass = Some(RenderPassState { contents, render_area_offset, render_area_extent, @@ -463,6 +463,7 @@ where // TODO: sync state update + self.next_command_index += 1; self } @@ -491,7 +492,7 @@ where // VUID-vkCmdNextSubpass2-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(RenderPassError::ForbiddenOutsideRenderPass)?; @@ -512,7 +513,7 @@ where // VUID? if self - .current_state + .builder_state .queries .values() .any(|state| state.in_subpass) @@ -561,7 +562,7 @@ where (fns.v1_0.cmd_next_subpass)(self.handle(), subpass_begin_info.contents); } - let render_pass_state = self.current_state.render_pass.as_mut().unwrap(); + let render_pass_state = self.builder_state.render_pass.as_mut().unwrap(); let begin_render_pass_state = match &mut render_pass_state.render_pass { RenderPassStateType::BeginRenderPass(x) => x, _ => unreachable!(), @@ -574,11 +575,12 @@ where if render_pass_state.view_mask != 0 { // When multiview is enabled, at the beginning of each subpass, all // non-render pass state is undefined. - self.current_state = Default::default(); + self.builder_state = Default::default(); } // TODO: sync state update + self.next_command_index += 1; self } @@ -601,7 +603,7 @@ where fn validate_end_render_pass(&self) -> Result<(), RenderPassError> { // VUID-vkCmdEndRenderPass2-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(RenderPassError::ForbiddenOutsideRenderPass)?; @@ -628,7 +630,7 @@ where // VUID? if self - .current_state + .builder_state .queries .values() .any(|state| state.in_subpass) @@ -671,10 +673,11 @@ where (fns.v1_0.cmd_end_render_pass)(self.handle()); } - self.current_state.render_pass = None; + self.builder_state.render_pass = None; // TODO: sync state update + self.next_command_index += 1; self } } @@ -732,7 +735,7 @@ where } // VUID-vkCmdBeginRendering-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(RenderPassError::ForbiddenInsideRenderPass); } @@ -1360,7 +1363,7 @@ where (fns.khr_dynamic_rendering.cmd_begin_rendering_khr)(self.handle(), &rendering_info); } - self.current_state.render_pass = Some(RenderPassState { + self.builder_state.render_pass = Some(RenderPassState { contents, render_area_offset, render_area_extent, @@ -1460,6 +1463,7 @@ where // TODO: sync state update + self.next_command_index += 1; self } @@ -1480,7 +1484,7 @@ where fn validate_end_rendering(&self) -> Result<(), RenderPassError> { // VUID-vkCmdEndRendering-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(RenderPassError::ForbiddenOutsideRenderPass)?; @@ -1521,10 +1525,11 @@ where (fns.khr_dynamic_rendering.cmd_end_rendering_khr)(self.handle()); } - self.current_state.render_pass = None; + self.builder_state.render_pass = None; // TODO: sync state update + self.next_command_index += 1; self } @@ -1569,7 +1574,7 @@ where ) -> Result<(), RenderPassError> { // VUID-vkCmdClearAttachments-renderpass let render_pass_state = self - .current_state + .builder_state .render_pass .as_ref() .ok_or(RenderPassError::ForbiddenOutsideRenderPass)?; @@ -1829,6 +1834,7 @@ where // TODO: sync state update + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/secondary.rs b/vulkano/src/command_buffer/standard/builder/secondary.rs index e813c3be..2eb05040 100644 --- a/vulkano/src/command_buffer/standard/builder/secondary.rs +++ b/vulkano/src/command_buffer/standard/builder/secondary.rs @@ -67,7 +67,7 @@ where // TODO: // VUID-vkCmdExecuteCommands-pCommandBuffers-00094 - if let Some(render_pass_state) = &self.current_state.render_pass { + if let Some(render_pass_state) = &self.builder_state.render_pass { // VUID-vkCmdExecuteCommands-contents-06018 // VUID-vkCmdExecuteCommands-flags-06024 if render_pass_state.contents != SubpassContents::SecondaryCommandBuffers { @@ -261,7 +261,7 @@ where } // VUID-vkCmdExecuteCommands-commandBuffer-00101 - if !self.current_state.queries.is_empty() + if !self.builder_state.queries.is_empty() && !self.device().enabled_features().inherited_queries { return Err(ExecuteCommandsError::RequirementNotMet { @@ -273,7 +273,7 @@ where }); } - for state in self.current_state.queries.values() { + for state in self.builder_state.queries.values() { match state.ty { QueryType::Occlusion => { // VUID-vkCmdExecuteCommands-commandBuffer-00102 @@ -375,15 +375,19 @@ where (fns.v1_0.cmd_execute_commands)(self.handle(), 1, &command_buffer.handle()); // The secondary command buffer could leave the primary in any state. - self.current_state = Default::default(); + self.builder_state = Default::default(); // If the secondary is non-concurrent or one-time use, that restricts the primary as well. self.usage = std::cmp::min(self.usage, command_buffer.usage); - self.resources.push(Box::new(command_buffer)); + let _command_index = self.next_command_index; + let _command_name = "execute_commands"; // TODO: sync state update + self.resources.push(Box::new(command_buffer)); + + self.next_command_index += 1; self } } diff --git a/vulkano/src/command_buffer/standard/builder/sync.rs b/vulkano/src/command_buffer/standard/builder/sync.rs index 83fefc33..c6081949 100644 --- a/vulkano/src/command_buffer/standard/builder/sync.rs +++ b/vulkano/src/command_buffer/standard/builder/sync.rs @@ -999,7 +999,7 @@ where Checks for current render pass */ - if let Some(render_pass_state) = self.current_state.render_pass.as_ref() { + if let Some(render_pass_state) = self.builder_state.render_pass.as_ref() { // VUID-vkCmdPipelineBarrier2-None-06191 let begin_render_pass_state = match &render_pass_state.render_pass { RenderPassStateType::BeginRenderPass(x) => x, @@ -1088,13 +1088,13 @@ where return self; } - let DependencyInfo { + let &DependencyInfo { dependency_flags, - memory_barriers, - buffer_memory_barriers, - image_memory_barriers, + ref memory_barriers, + ref buffer_memory_barriers, + ref image_memory_barriers, _ne: _, - } = dependency_info; + } = &dependency_info; if self.device().enabled_features().synchronization2 { let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers @@ -1341,43 +1341,27 @@ where ); } + let command_index = self.next_command_index; + let command_name = "pipeline_barrier"; + self.resources_usage_state.record_pipeline_barrier( + command_index, + command_name, + &dependency_info, + self.queue_family_properties().queue_flags, + ); + self.resources .reserve(buffer_memory_barriers.len() + image_memory_barriers.len()); - for barrier in buffer_memory_barriers { - let BufferMemoryBarrier { - src_stages: _, - src_access: _, - dst_stages: _, - dst_access: _, - queue_family_ownership_transfer: _, // TODO: - buffer, - range: _, - _ne: _, - } = barrier; - - self.resources.push(Box::new(buffer)); + for barrier in dependency_info.buffer_memory_barriers { + self.resources.push(Box::new(barrier.buffer)); } - for barrier in image_memory_barriers { - let ImageMemoryBarrier { - src_stages: _, - src_access: _, - dst_stages: _, - dst_access: _, - old_layout: _, // TODO: - new_layout: _, // TODO: - queue_family_ownership_transfer: _, // TODO: - image, - subresource_range: _, - _ne: _, - } = barrier; - - self.resources.push(Box::new(image)); + for barrier in dependency_info.image_memory_barriers { + self.resources.push(Box::new(barrier.image)); } - // TODO: sync state update - + self.next_command_index += 1; self } @@ -1406,7 +1390,7 @@ where dependency_info: &DependencyInfo, ) -> Result<(), SynchronizationError> { // VUID-vkCmdSetEvent2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(SynchronizationError::ForbiddenInsideRenderPass); } @@ -2508,6 +2492,7 @@ where // TODO: sync state update + self.next_command_index += 1; self } @@ -2981,7 +2966,7 @@ where } // VUID-vkCmdWaitEvents2-dependencyFlags-03844 - if self.current_state.render_pass.is_some() + if self.builder_state.render_pass.is_some() && src_stages.intersects(PipelineStages::HOST) { todo!() @@ -3830,6 +3815,7 @@ where // TODO: sync state update + self.next_command_index += 1; self } @@ -3859,7 +3845,7 @@ where stages: PipelineStages, ) -> Result<(), SynchronizationError> { // VUID-vkCmdResetEvent2-renderpass - if self.current_state.render_pass.is_some() { + if self.builder_state.render_pass.is_some() { return Err(SynchronizationError::ForbiddenInsideRenderPass); } @@ -4081,6 +4067,7 @@ where // TODO: sync state update + self.next_command_index += 1; self } } diff --git a/vulkano/src/image/layout.rs b/vulkano/src/image/layout.rs index c60fb1ba..16f4bc04 100644 --- a/vulkano/src/image/layout.rs +++ b/vulkano/src/image/layout.rs @@ -199,6 +199,13 @@ vulkan_enum! { },*/ } +impl Default for ImageLayout { + #[inline] + fn default() -> Self { + ImageLayout::Undefined + } +} + /// The set of layouts to use for an image when used in descriptor of various kinds. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ImageDescriptorLayouts { diff --git a/vulkano/src/sync/mod.rs b/vulkano/src/sync/mod.rs index bbc31919..512c02e2 100644 --- a/vulkano/src/sync/mod.rs +++ b/vulkano/src/sync/mod.rs @@ -16,6 +16,7 @@ //! This safety is enforced at runtime by vulkano but it is not magic and you will require some //! knowledge if you want to avoid errors. +pub(crate) use self::pipeline::{PipelineStageAccess, PipelineStageAccessSet}; pub use self::{ future::{now, FlushError, GpuFuture}, pipeline::{ diff --git a/vulkano/src/sync/pipeline.rs b/vulkano/src/sync/pipeline.rs index a528cfa6..30162e45 100644 --- a/vulkano/src/sync/pipeline.rs +++ b/vulkano/src/sync/pipeline.rs @@ -9,11 +9,15 @@ use crate::{ buffer::sys::Buffer, + descriptor_set::layout::DescriptorType, device::{Device, QueueFlags}, image::{sys::Image, ImageAspects, ImageLayout, ImageSubresourceRange}, macros::{vulkan_bitflags, vulkan_bitflags_enum}, + shader::ShaderStages, DeviceSize, RequirementNotMet, Version, }; +use ahash::HashMap; +use once_cell::sync::Lazy; use smallvec::SmallVec; use std::{ops::Range, sync::Arc}; @@ -95,6 +99,28 @@ vulkan_bitflags_enum! { self } + + pub(crate) fn with_earlier(self) -> Self { + STAGE_ORDER.iter().rev().fold( + self, + |stages, &(before, after)| if stages.intersects(after) { + stages.union(before) + } else { + stages + } + ) + } + + pub(crate) fn with_later(self) -> Self { + STAGE_ORDER.iter().fold( + self, + |stages, &(before, after)| if stages.intersects(before) { + stages.union(after) + } else { + stages + } + ) + } }, /// A single stage in the device's processing pipeline. @@ -336,6 +362,125 @@ vulkan_bitflags_enum! { }, } +macro_rules! stage_order { + { + $(( + $($before:ident)|+, + $($after:ident)|+, + ),)+ + } => { + static STAGE_ORDER: [(PipelineStages, PipelineStages); 15] = [ + $( + ( + PipelineStages::empty() + $(.union(PipelineStages::$before))+ + , + PipelineStages::empty() + $(.union(PipelineStages::$after))+ + ), + )+ + ]; + }; +} + +// Per +// https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-types +stage_order! { + ( + TOP_OF_PIPE, + DRAW_INDIRECT + | COPY | RESOLVE | BLIT | CLEAR + | VIDEO_DECODE | VIDEO_ENCODE + | CONDITIONAL_RENDERING + | COMMAND_PREPROCESS + | ACCELERATION_STRUCTURE_BUILD + | SUBPASS_SHADING + | ACCELERATION_STRUCTURE_COPY + | MICROMAP_BUILD + | OPTICAL_FLOW, + ), + + ( + DRAW_INDIRECT, + COMPUTE_SHADER | INDEX_INPUT | RAY_TRACING_SHADER | TASK_SHADER, + ), + + ( + INDEX_INPUT, + VERTEX_ATTRIBUTE_INPUT, + ), + + ( + VERTEX_ATTRIBUTE_INPUT, + VERTEX_SHADER, + ), + + ( + VERTEX_SHADER, + TESSELLATION_CONTROL_SHADER, + ), + + ( + TESSELLATION_CONTROL_SHADER, + TESSELLATION_EVALUATION_SHADER, + ), + + ( + TESSELLATION_EVALUATION_SHADER, + GEOMETRY_SHADER, + ), + + ( + GEOMETRY_SHADER, + TRANSFORM_FEEDBACK, + ), + + ( + TASK_SHADER, + MESH_SHADER, + ), + + ( + TRANSFORM_FEEDBACK | MESH_SHADER, + FRAGMENT_SHADING_RATE_ATTACHMENT, + ), + + ( + FRAGMENT_DENSITY_PROCESS | FRAGMENT_SHADING_RATE_ATTACHMENT, + EARLY_FRAGMENT_TESTS, + ), + + ( + EARLY_FRAGMENT_TESTS, + FRAGMENT_SHADER, + ), + + ( + FRAGMENT_SHADER, + LATE_FRAGMENT_TESTS, + ), + + ( + LATE_FRAGMENT_TESTS, + COLOR_ATTACHMENT_OUTPUT, + ), + + ( + COLOR_ATTACHMENT_OUTPUT + | COMPUTE_SHADER + | COPY | RESOLVE | BLIT | CLEAR + | VIDEO_DECODE | VIDEO_ENCODE + | CONDITIONAL_RENDERING + | COMMAND_PREPROCESS + | ACCELERATION_STRUCTURE_BUILD | RAY_TRACING_SHADER + | SUBPASS_SHADING + | ACCELERATION_STRUCTURE_COPY + | MICROMAP_BUILD + | OPTICAL_FLOW, + BOTTOM_OF_PIPE, + ), +} + impl From for PipelineStages { /// Corresponds to the table "[Supported pipeline stage flags]" in the Vulkan specification. /// @@ -726,8 +871,7 @@ impl From for AccessFlags { | AccessFlags::SHADER_STORAGE_READ | AccessFlags::SHADER_WRITE | AccessFlags::SHADER_STORAGE_WRITE - | AccessFlags::ACCELERATION_STRUCTURE_READ - | AccessFlags::SHADER_BINDING_TABLE_READ; + | AccessFlags::ACCELERATION_STRUCTURE_READ; } if val.intersects(PipelineStages::FRAGMENT_SHADER | PipelineStages::SUBPASS_SHADING) { @@ -800,7 +944,6 @@ impl From for AccessFlags { | AccessFlags::TRANSFER_WRITE | AccessFlags::ACCELERATION_STRUCTURE_READ | AccessFlags::ACCELERATION_STRUCTURE_WRITE - | AccessFlags::SHADER_BINDING_TABLE_READ | AccessFlags::MICROMAP_READ; } @@ -854,6 +997,1206 @@ pub struct PipelineMemoryAccess { pub exclusive: bool, } +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[allow(non_camel_case_types, dead_code)] +#[repr(u8)] +pub(crate) enum PipelineStageAccess { + // There is no stage/access for this, but it is a memory write operation nonetheless. + ImageLayoutTransition, + + DrawIndirect_IndirectCommandRead, + DrawIndirect_TransformFeedbackCounterRead, + VertexShader_UniformRead, + VertexShader_ShaderSampledRead, + VertexShader_ShaderStorageRead, + VertexShader_ShaderStorageWrite, + VertexShader_AccelerationStructureRead, + TessellationControlShader_UniformRead, + TessellationControlShader_ShaderSampledRead, + TessellationControlShader_ShaderStorageRead, + TessellationControlShader_ShaderStorageWrite, + TessellationControlShader_AccelerationStructureRead, + TessellationEvaluationShader_UniformRead, + TessellationEvaluationShader_ShaderSampledRead, + TessellationEvaluationShader_ShaderStorageRead, + TessellationEvaluationShader_ShaderStorageWrite, + TessellationEvaluationShader_AccelerationStructureRead, + GeometryShader_UniformRead, + GeometryShader_ShaderSampledRead, + GeometryShader_ShaderStorageRead, + GeometryShader_ShaderStorageWrite, + GeometryShader_AccelerationStructureRead, + FragmentShader_UniformRead, + FragmentShader_InputAttachmentRead, + FragmentShader_ShaderSampledRead, + FragmentShader_ShaderStorageRead, + FragmentShader_ShaderStorageWrite, + FragmentShader_AccelerationStructureRead, + EarlyFragmentTests_DepthStencilAttachmentRead, + EarlyFragmentTests_DepthStencilAttachmentWrite, + LateFragmentTests_DepthStencilAttachmentRead, + LateFragmentTests_DepthStencilAttachmentWrite, + ColorAttachmentOutput_ColorAttachmentRead, + ColorAttachmentOutput_ColorAttachmentWrite, + ColorAttachmentOutput_ColorAttachmentReadNoncoherent, + ComputeShader_UniformRead, + ComputeShader_ShaderSampledRead, + ComputeShader_ShaderStorageRead, + ComputeShader_ShaderStorageWrite, + ComputeShader_AccelerationStructureRead, + Host_HostRead, + Host_HostWrite, + Copy_TransferRead, + Copy_TransferWrite, + Resolve_TransferRead, + Resolve_TransferWrite, + Blit_TransferRead, + Blit_TransferWrite, + Clear_TransferWrite, + IndexInput_IndexRead, + VertexAttributeInput_VertexAttributeRead, + VideoDecode_VideoDecodeRead, + VideoDecode_VideoDecodeWrite, + VideoEncode_VideoEncodeRead, + VideoEncode_VideoEncodeWrite, + TransformFeedback_TransformFeedbackWrite, + TransformFeedback_TransformFeedbackCounterRead, + TransformFeedback_TransformFeedbackCounterWrite, + ConditionalRendering_ConditionalRenderingRead, + AccelerationStructureBuild_IndirectCommandRead, + AccelerationStructureBuild_UniformRead, + AccelerationStructureBuild_TransferRead, + AccelerationStructureBuild_TransferWrite, + AccelerationStructureBuild_ShaderSampledRead, + AccelerationStructureBuild_ShaderStorageRead, + AccelerationStructureBuild_AccelerationStructureRead, + AccelerationStructureBuild_AccelerationStructureWrite, + AccelerationStructureBuild_MicromapRead, + RayTracingShader_UniformRead, + RayTracingShader_ShaderSampledRead, + RayTracingShader_ShaderStorageRead, + RayTracingShader_ShaderStorageWrite, + RayTracingShader_AccelerationStructureRead, + RayTracingShader_ShaderBindingTableRead, + FragmentDensityProcess_FragmentDensityMapRead, + FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead, + CommandPreprocess_CommandPreprocessRead, + CommandPreprocess_CommandPreprocessWrite, + TaskShader_UniformRead, + TaskShader_ShaderSampledRead, + TaskShader_ShaderStorageRead, + TaskShader_ShaderStorageWrite, + TaskShader_AccelerationStructureRead, + MeshShader_UniformRead, + MeshShader_ShaderSampledRead, + MeshShader_ShaderStorageRead, + MeshShader_ShaderStorageWrite, + MeshShader_AccelerationStructureRead, + SubpassShading_InputAttachmentRead, + InvocationMask_InvocationMaskRead, + AccelerationStructureCopy_TransferRead, + AccelerationStructureCopy_TransferWrite, + OpticalFlow_OpticalFlowRead, + OpticalFlow_OpticalFlowWrite, + MicromapBuild_MicromapRead, + MicromapBuild_MicromapWrite, + + // If there are ever more than 128 preceding values, then there will be a compile error: + // "discriminant value `128` assigned more than once" + __MAX_VALUE__ = 128, +} + +impl PipelineStageAccess { + #[inline] + pub(crate) const fn is_write(self) -> bool { + matches!( + self, + PipelineStageAccess::ImageLayoutTransition + | PipelineStageAccess::VertexShader_ShaderStorageWrite + | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite + | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite + | PipelineStageAccess::GeometryShader_ShaderStorageWrite + | PipelineStageAccess::FragmentShader_ShaderStorageWrite + | PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite + | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite + | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite + | PipelineStageAccess::ComputeShader_ShaderStorageWrite + | PipelineStageAccess::Host_HostWrite + | PipelineStageAccess::Copy_TransferWrite + | PipelineStageAccess::Resolve_TransferWrite + | PipelineStageAccess::Blit_TransferWrite + | PipelineStageAccess::Clear_TransferWrite + | PipelineStageAccess::VideoDecode_VideoDecodeWrite + | PipelineStageAccess::VideoEncode_VideoEncodeWrite + | PipelineStageAccess::TransformFeedback_TransformFeedbackWrite + | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite + | PipelineStageAccess::AccelerationStructureBuild_TransferWrite + | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite + | PipelineStageAccess::RayTracingShader_ShaderStorageWrite + | PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite + | PipelineStageAccess::TaskShader_ShaderStorageWrite + | PipelineStageAccess::MeshShader_ShaderStorageWrite + | PipelineStageAccess::AccelerationStructureCopy_TransferWrite + | PipelineStageAccess::OpticalFlow_OpticalFlowWrite + | PipelineStageAccess::MicromapBuild_MicromapWrite + ) + } + + pub(crate) fn iter_descriptor_stages( + descriptor_type: DescriptorType, + stages_read: ShaderStages, + stages_write: ShaderStages, + ) -> impl Iterator + 'static { + static MAP_READ: Lazy< + HashMap>, + > = Lazy::new(|| { + let uniform_read = [ + DescriptorType::UniformBuffer, + DescriptorType::UniformBufferDynamic, + ] + .into_iter() + .map(|descriptor_type| { + ( + descriptor_type, + [ + ( + PipelineStage::VertexShader, + PipelineStageAccess::VertexShader_UniformRead, + ), + ( + PipelineStage::TessellationControlShader, + PipelineStageAccess::TessellationControlShader_UniformRead, + ), + ( + PipelineStage::TessellationEvaluationShader, + PipelineStageAccess::TessellationControlShader_UniformRead, + ), + ( + PipelineStage::GeometryShader, + PipelineStageAccess::GeometryShader_UniformRead, + ), + ( + PipelineStage::FragmentShader, + PipelineStageAccess::FragmentShader_UniformRead, + ), + ( + PipelineStage::ComputeShader, + PipelineStageAccess::ComputeShader_UniformRead, + ), + ( + PipelineStage::RayTracingShader, + PipelineStageAccess::RayTracingShader_UniformRead, + ), + ( + PipelineStage::TaskShader, + PipelineStageAccess::TaskShader_UniformRead, + ), + ( + PipelineStage::MeshShader, + PipelineStageAccess::MeshShader_UniformRead, + ), + ] + .into_iter() + .collect(), + ) + }); + + let shader_sampled_read = [ + DescriptorType::CombinedImageSampler, + DescriptorType::SampledImage, + DescriptorType::UniformTexelBuffer, + ] + .into_iter() + .map(|descriptor_type| { + ( + descriptor_type, + [ + ( + PipelineStage::VertexShader, + PipelineStageAccess::VertexShader_ShaderSampledRead, + ), + ( + PipelineStage::TessellationControlShader, + PipelineStageAccess::TessellationControlShader_ShaderSampledRead, + ), + ( + PipelineStage::TessellationEvaluationShader, + PipelineStageAccess::TessellationControlShader_ShaderSampledRead, + ), + ( + PipelineStage::GeometryShader, + PipelineStageAccess::GeometryShader_ShaderSampledRead, + ), + ( + PipelineStage::FragmentShader, + PipelineStageAccess::FragmentShader_ShaderSampledRead, + ), + ( + PipelineStage::ComputeShader, + PipelineStageAccess::ComputeShader_ShaderSampledRead, + ), + ( + PipelineStage::RayTracingShader, + PipelineStageAccess::RayTracingShader_ShaderSampledRead, + ), + ( + PipelineStage::TaskShader, + PipelineStageAccess::TaskShader_ShaderSampledRead, + ), + ( + PipelineStage::MeshShader, + PipelineStageAccess::MeshShader_ShaderSampledRead, + ), + ] + .into_iter() + .collect(), + ) + }); + + let shader_storage_read = [ + DescriptorType::StorageImage, + DescriptorType::StorageTexelBuffer, + DescriptorType::StorageBuffer, + DescriptorType::StorageBufferDynamic, + ] + .into_iter() + .map(|descriptor_type| { + ( + descriptor_type, + [ + ( + PipelineStage::VertexShader, + PipelineStageAccess::VertexShader_ShaderStorageRead, + ), + ( + PipelineStage::TessellationControlShader, + PipelineStageAccess::TessellationControlShader_ShaderStorageRead, + ), + ( + PipelineStage::TessellationEvaluationShader, + PipelineStageAccess::TessellationControlShader_ShaderStorageRead, + ), + ( + PipelineStage::GeometryShader, + PipelineStageAccess::GeometryShader_ShaderStorageRead, + ), + ( + PipelineStage::FragmentShader, + PipelineStageAccess::FragmentShader_ShaderStorageRead, + ), + ( + PipelineStage::ComputeShader, + PipelineStageAccess::ComputeShader_ShaderStorageRead, + ), + ( + PipelineStage::RayTracingShader, + PipelineStageAccess::RayTracingShader_ShaderStorageRead, + ), + ( + PipelineStage::TaskShader, + PipelineStageAccess::TaskShader_ShaderStorageRead, + ), + ( + PipelineStage::MeshShader, + PipelineStageAccess::MeshShader_ShaderStorageRead, + ), + ] + .into_iter() + .collect(), + ) + }); + + let input_attachment_read = + [DescriptorType::InputAttachment] + .into_iter() + .map(|descriptor_type| { + ( + descriptor_type, + [( + PipelineStage::FragmentShader, + PipelineStageAccess::FragmentShader_InputAttachmentRead, + )] + .into_iter() + .collect(), + ) + }); + + uniform_read + .chain(shader_sampled_read) + .chain(shader_storage_read) + .chain(input_attachment_read) + .collect() + }); + static MAP_WRITE: Lazy< + HashMap>, + > = Lazy::new(|| { + let shader_storage_write = [ + DescriptorType::StorageImage, + DescriptorType::StorageTexelBuffer, + DescriptorType::StorageBuffer, + DescriptorType::StorageBufferDynamic, + ] + .into_iter() + .map(|descriptor_type| { + ( + descriptor_type, + [ + ( + PipelineStage::VertexShader, + PipelineStageAccess::VertexShader_ShaderStorageWrite, + ), + ( + PipelineStage::TessellationControlShader, + PipelineStageAccess::TessellationControlShader_ShaderStorageWrite, + ), + ( + PipelineStage::TessellationEvaluationShader, + PipelineStageAccess::TessellationControlShader_ShaderStorageWrite, + ), + ( + PipelineStage::GeometryShader, + PipelineStageAccess::GeometryShader_ShaderStorageWrite, + ), + ( + PipelineStage::FragmentShader, + PipelineStageAccess::FragmentShader_ShaderStorageWrite, + ), + ( + PipelineStage::ComputeShader, + PipelineStageAccess::ComputeShader_ShaderStorageWrite, + ), + ( + PipelineStage::RayTracingShader, + PipelineStageAccess::RayTracingShader_ShaderStorageWrite, + ), + ( + PipelineStage::TaskShader, + PipelineStageAccess::TaskShader_ShaderStorageWrite, + ), + ( + PipelineStage::MeshShader, + PipelineStageAccess::MeshShader_ShaderStorageWrite, + ), + ] + .into_iter() + .collect(), + ) + }); + + shader_storage_write.collect() + }); + + [ + (stages_read, &*MAP_READ, "read"), + (stages_write, &*MAP_WRITE, "write"), + ] + .into_iter() + .filter(|(stages, _, _)| !stages.is_empty()) + .flat_map(move |(stages, descriptor_map, access)| { + let stages_map = descriptor_map.get(&descriptor_type).unwrap_or_else(|| { + panic!( + "DescriptorType::{:?} does not {} memory", + descriptor_type, access, + ) + }); + + PipelineStages::from(stages).into_iter().map(move |stage| { + *stages_map.get(&stage).unwrap_or_else(|| { + panic!( + "DescriptorType::{:?} does not {} memory in PipelineStage::{:?}", + descriptor_type, access, stage, + ) + }) + }) + }) + } +} + +impl TryFrom for PipelineStage { + type Error = (); + + #[inline] + fn try_from(val: PipelineStageAccess) -> Result { + Ok(match val { + PipelineStageAccess::ImageLayoutTransition => return Err(()), + PipelineStageAccess::DrawIndirect_IndirectCommandRead + | PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead => PipelineStage::DrawIndirect, + PipelineStageAccess::VertexShader_UniformRead + | PipelineStageAccess::VertexShader_ShaderSampledRead + | PipelineStageAccess::VertexShader_ShaderStorageRead + | PipelineStageAccess::VertexShader_ShaderStorageWrite + | PipelineStageAccess::VertexShader_AccelerationStructureRead => PipelineStage::VertexShader, + PipelineStageAccess::TessellationControlShader_UniformRead + | PipelineStageAccess::TessellationControlShader_ShaderSampledRead + | PipelineStageAccess::TessellationControlShader_ShaderStorageRead + | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite + | PipelineStageAccess::TessellationControlShader_AccelerationStructureRead => PipelineStage::TessellationControlShader, + PipelineStageAccess::TessellationEvaluationShader_UniformRead + | PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead + | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead + | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite + | PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead => PipelineStage::TessellationEvaluationShader, + PipelineStageAccess::GeometryShader_UniformRead + | PipelineStageAccess::GeometryShader_ShaderSampledRead + | PipelineStageAccess::GeometryShader_ShaderStorageRead + | PipelineStageAccess::GeometryShader_ShaderStorageWrite + | PipelineStageAccess::GeometryShader_AccelerationStructureRead => PipelineStage::GeometryShader, + PipelineStageAccess::FragmentShader_UniformRead + | PipelineStageAccess::FragmentShader_InputAttachmentRead + | PipelineStageAccess::FragmentShader_ShaderSampledRead + | PipelineStageAccess::FragmentShader_ShaderStorageRead + | PipelineStageAccess::FragmentShader_ShaderStorageWrite + | PipelineStageAccess::FragmentShader_AccelerationStructureRead => PipelineStage::FragmentShader, + PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead + | PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite => PipelineStage::EarlyFragmentTests, + PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead + | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite => PipelineStage::LateFragmentTests, + PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead + | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite + | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent => PipelineStage::ColorAttachmentOutput, + PipelineStageAccess::ComputeShader_UniformRead + | PipelineStageAccess::ComputeShader_ShaderSampledRead + | PipelineStageAccess::ComputeShader_ShaderStorageRead + | PipelineStageAccess::ComputeShader_ShaderStorageWrite + | PipelineStageAccess::ComputeShader_AccelerationStructureRead => PipelineStage::ComputeShader, + PipelineStageAccess::Host_HostRead + | PipelineStageAccess::Host_HostWrite => PipelineStage::Host, + PipelineStageAccess::Copy_TransferRead + | PipelineStageAccess::Copy_TransferWrite => PipelineStage::Copy, + PipelineStageAccess::Resolve_TransferRead + | PipelineStageAccess::Resolve_TransferWrite => PipelineStage::Resolve, + PipelineStageAccess::Blit_TransferRead + | PipelineStageAccess::Blit_TransferWrite => PipelineStage::Blit, + PipelineStageAccess::Clear_TransferWrite => PipelineStage::Clear, + PipelineStageAccess::IndexInput_IndexRead => PipelineStage::IndexInput, + PipelineStageAccess::VertexAttributeInput_VertexAttributeRead => PipelineStage::VertexAttributeInput, + PipelineStageAccess::VideoDecode_VideoDecodeRead + | PipelineStageAccess::VideoDecode_VideoDecodeWrite => PipelineStage::VideoDecode, + PipelineStageAccess::VideoEncode_VideoEncodeRead + | PipelineStageAccess::VideoEncode_VideoEncodeWrite => PipelineStage::VideoEncode, + PipelineStageAccess::TransformFeedback_TransformFeedbackWrite + | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead + | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite => PipelineStage::TransformFeedback, + PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead => PipelineStage::ConditionalRendering, + PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead + | PipelineStageAccess::AccelerationStructureBuild_UniformRead + | PipelineStageAccess::AccelerationStructureBuild_TransferRead + | PipelineStageAccess::AccelerationStructureBuild_TransferWrite + | PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead + | PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead + | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead + | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite + | PipelineStageAccess::AccelerationStructureBuild_MicromapRead => PipelineStage::AccelerationStructureBuild, + PipelineStageAccess::RayTracingShader_UniformRead + | PipelineStageAccess::RayTracingShader_ShaderSampledRead + | PipelineStageAccess::RayTracingShader_ShaderStorageRead + | PipelineStageAccess::RayTracingShader_ShaderStorageWrite + | PipelineStageAccess::RayTracingShader_AccelerationStructureRead => PipelineStage::RayTracingShader, + | PipelineStageAccess::RayTracingShader_ShaderBindingTableRead => PipelineStage::RayTracingShader, + PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead => PipelineStage::FragmentDensityProcess, + PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead => PipelineStage::FragmentShadingRateAttachment, + PipelineStageAccess::CommandPreprocess_CommandPreprocessRead + | PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite => PipelineStage::CommandPreprocess, + PipelineStageAccess::TaskShader_UniformRead + | PipelineStageAccess::TaskShader_ShaderSampledRead + | PipelineStageAccess::TaskShader_ShaderStorageRead + | PipelineStageAccess::TaskShader_ShaderStorageWrite + | PipelineStageAccess::TaskShader_AccelerationStructureRead => PipelineStage::TaskShader, + PipelineStageAccess::MeshShader_UniformRead + | PipelineStageAccess::MeshShader_ShaderSampledRead + | PipelineStageAccess::MeshShader_ShaderStorageRead + | PipelineStageAccess::MeshShader_ShaderStorageWrite + | PipelineStageAccess::MeshShader_AccelerationStructureRead => PipelineStage::MeshShader, + PipelineStageAccess::SubpassShading_InputAttachmentRead => PipelineStage::SubpassShading, + PipelineStageAccess::InvocationMask_InvocationMaskRead => PipelineStage::InvocationMask, + PipelineStageAccess::AccelerationStructureCopy_TransferRead + | PipelineStageAccess::AccelerationStructureCopy_TransferWrite => PipelineStage::AccelerationStructureCopy, + PipelineStageAccess::OpticalFlow_OpticalFlowRead + | PipelineStageAccess::OpticalFlow_OpticalFlowWrite => PipelineStage::OpticalFlow, + PipelineStageAccess::MicromapBuild_MicromapRead + | PipelineStageAccess::MicromapBuild_MicromapWrite => PipelineStage::MicromapBuild, + PipelineStageAccess::__MAX_VALUE__ => unreachable!(), + }) + } +} + +impl From for AccessFlags { + #[inline] + fn from(val: PipelineStageAccess) -> Self { + match val { + PipelineStageAccess::ImageLayoutTransition => AccessFlags::empty(), + PipelineStageAccess::DrawIndirect_IndirectCommandRead + | PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead => AccessFlags::INDIRECT_COMMAND_READ, + PipelineStageAccess::IndexInput_IndexRead => AccessFlags::INDEX_READ, + PipelineStageAccess::VertexAttributeInput_VertexAttributeRead => AccessFlags::VERTEX_ATTRIBUTE_READ, + PipelineStageAccess::VertexShader_UniformRead + | PipelineStageAccess::TessellationControlShader_UniformRead + | PipelineStageAccess::TessellationEvaluationShader_UniformRead + | PipelineStageAccess::GeometryShader_UniformRead + | PipelineStageAccess::FragmentShader_UniformRead + | PipelineStageAccess::ComputeShader_UniformRead + | PipelineStageAccess::AccelerationStructureBuild_UniformRead + | PipelineStageAccess::RayTracingShader_UniformRead + | PipelineStageAccess::TaskShader_UniformRead + | PipelineStageAccess::MeshShader_UniformRead => AccessFlags::UNIFORM_READ, + PipelineStageAccess::FragmentShader_InputAttachmentRead + | PipelineStageAccess::SubpassShading_InputAttachmentRead => AccessFlags::INPUT_ATTACHMENT_READ, + PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead => AccessFlags::COLOR_ATTACHMENT_READ, + PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite => AccessFlags::COLOR_ATTACHMENT_WRITE, + PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead + | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead => AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ, + PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite + | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite => AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE, + PipelineStageAccess::Copy_TransferRead + | PipelineStageAccess::Resolve_TransferRead + | PipelineStageAccess::Blit_TransferRead + | PipelineStageAccess::AccelerationStructureBuild_TransferRead + | PipelineStageAccess::AccelerationStructureCopy_TransferRead => AccessFlags::TRANSFER_READ, + PipelineStageAccess::Copy_TransferWrite + | PipelineStageAccess::Resolve_TransferWrite + | PipelineStageAccess::Blit_TransferWrite + | PipelineStageAccess::Clear_TransferWrite + | PipelineStageAccess::AccelerationStructureBuild_TransferWrite + | PipelineStageAccess::AccelerationStructureCopy_TransferWrite => AccessFlags::TRANSFER_WRITE, + PipelineStageAccess::Host_HostRead => AccessFlags::HOST_READ, + PipelineStageAccess::Host_HostWrite => AccessFlags::HOST_WRITE, + PipelineStageAccess::VertexShader_ShaderSampledRead + | PipelineStageAccess::TessellationControlShader_ShaderSampledRead + | PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead + | PipelineStageAccess::GeometryShader_ShaderSampledRead + | PipelineStageAccess::FragmentShader_ShaderSampledRead + | PipelineStageAccess::ComputeShader_ShaderSampledRead + | PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead + | PipelineStageAccess::RayTracingShader_ShaderSampledRead + | PipelineStageAccess::TaskShader_ShaderSampledRead + | PipelineStageAccess::MeshShader_ShaderSampledRead => AccessFlags::SHADER_SAMPLED_READ, + PipelineStageAccess::VertexShader_ShaderStorageRead + | PipelineStageAccess::TessellationControlShader_ShaderStorageRead + | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead + | PipelineStageAccess::GeometryShader_ShaderStorageRead + | PipelineStageAccess::FragmentShader_ShaderStorageRead + | PipelineStageAccess::ComputeShader_ShaderStorageRead + | PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead + | PipelineStageAccess::RayTracingShader_ShaderStorageRead + | PipelineStageAccess::TaskShader_ShaderStorageRead + | PipelineStageAccess::MeshShader_ShaderStorageRead => AccessFlags::SHADER_STORAGE_READ, + PipelineStageAccess::VertexShader_ShaderStorageWrite + | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite + | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite + | PipelineStageAccess::GeometryShader_ShaderStorageWrite + | PipelineStageAccess::FragmentShader_ShaderStorageWrite + | PipelineStageAccess::ComputeShader_ShaderStorageWrite + | PipelineStageAccess::RayTracingShader_ShaderStorageWrite + | PipelineStageAccess::TaskShader_ShaderStorageWrite + | PipelineStageAccess::MeshShader_ShaderStorageWrite => AccessFlags::SHADER_STORAGE_WRITE, + PipelineStageAccess::VideoDecode_VideoDecodeRead => AccessFlags::VIDEO_DECODE_READ, + PipelineStageAccess::VideoDecode_VideoDecodeWrite => AccessFlags::VIDEO_DECODE_WRITE, + PipelineStageAccess::VideoEncode_VideoEncodeRead => AccessFlags::VIDEO_ENCODE_READ, + PipelineStageAccess::VideoEncode_VideoEncodeWrite => AccessFlags::VIDEO_ENCODE_WRITE, + PipelineStageAccess::TransformFeedback_TransformFeedbackWrite => AccessFlags::TRANSFORM_FEEDBACK_WRITE, + PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead + | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead => AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ, + PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite => AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE, + PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead => AccessFlags::CONDITIONAL_RENDERING_READ, + PipelineStageAccess::CommandPreprocess_CommandPreprocessRead => AccessFlags::COMMAND_PREPROCESS_READ, + PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite => AccessFlags::COMMAND_PREPROCESS_WRITE, + PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead => AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ, + PipelineStageAccess::VertexShader_AccelerationStructureRead + | PipelineStageAccess::TessellationControlShader_AccelerationStructureRead + | PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead + | PipelineStageAccess::GeometryShader_AccelerationStructureRead + | PipelineStageAccess::FragmentShader_AccelerationStructureRead + | PipelineStageAccess::ComputeShader_AccelerationStructureRead + | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead + | PipelineStageAccess::RayTracingShader_AccelerationStructureRead + | PipelineStageAccess::TaskShader_AccelerationStructureRead + | PipelineStageAccess::MeshShader_AccelerationStructureRead => AccessFlags::ACCELERATION_STRUCTURE_READ, + PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite => AccessFlags::ACCELERATION_STRUCTURE_WRITE, + PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead => AccessFlags::FRAGMENT_DENSITY_MAP_READ, + PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent => AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT, + PipelineStageAccess::InvocationMask_InvocationMaskRead => AccessFlags::INVOCATION_MASK_READ, + PipelineStageAccess::RayTracingShader_ShaderBindingTableRead => AccessFlags::SHADER_BINDING_TABLE_READ, + PipelineStageAccess::AccelerationStructureBuild_MicromapRead + | PipelineStageAccess::MicromapBuild_MicromapRead => AccessFlags::MICROMAP_READ, + PipelineStageAccess::MicromapBuild_MicromapWrite => AccessFlags::MICROMAP_WRITE, + PipelineStageAccess::OpticalFlow_OpticalFlowRead => AccessFlags::OPTICAL_FLOW_READ, + PipelineStageAccess::OpticalFlow_OpticalFlowWrite => AccessFlags::OPTICAL_FLOW_WRITE, + PipelineStageAccess::__MAX_VALUE__ => unreachable!(), + } + } +} + +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)] +pub(crate) struct PipelineStageAccessSet(u128); + +#[allow(dead_code)] +impl PipelineStageAccessSet { + #[inline] + pub(crate) const fn empty() -> Self { + Self(0) + } + + #[inline] + pub(crate) const fn count(self) -> u32 { + self.0.count_ones() + } + + #[inline] + pub(crate) const fn is_empty(self) -> bool { + self.0 == 0 + } + + #[inline] + pub(crate) const fn intersects(self, other: Self) -> bool { + self.0 & other.0 != 0 + } + + #[inline] + pub(crate) const fn contains(self, other: Self) -> bool { + self.0 & other.0 == other.0 + } + + #[inline] + pub(crate) const fn union(self, other: Self) -> Self { + Self(self.0 | other.0) + } + + #[inline] + pub(crate) const fn intersection(self, other: Self) -> Self { + Self(self.0 & other.0) + } + + #[inline] + pub(crate) const fn difference(self, other: Self) -> Self { + Self(self.0 & !other.0) + } + + #[inline] + pub(crate) const fn symmetric_difference(self, other: Self) -> Self { + Self(self.0 ^ other.0) + } + + #[inline] + pub(crate) fn contains_enum(self, val: PipelineStageAccess) -> bool { + self.intersects(val.into()) + } +} + +impl std::ops::BitAnd for PipelineStageAccessSet { + type Output = Self; + + #[inline] + fn bitand(self, rhs: Self) -> Self { + self.intersection(rhs) + } +} + +impl std::ops::BitAndAssign for PipelineStageAccessSet { + #[inline] + fn bitand_assign(&mut self, rhs: Self) { + *self = self.intersection(rhs); + } +} + +impl std::ops::BitOr for PipelineStageAccessSet { + type Output = Self; + + #[inline] + fn bitor(self, rhs: Self) -> Self { + self.union(rhs) + } +} + +impl std::ops::BitOrAssign for PipelineStageAccessSet { + #[inline] + fn bitor_assign(&mut self, rhs: Self) { + *self = self.union(rhs); + } +} + +impl std::ops::BitXor for PipelineStageAccessSet { + type Output = Self; + + #[inline] + fn bitxor(self, rhs: Self) -> Self { + self.symmetric_difference(rhs) + } +} + +impl std::ops::BitXorAssign for PipelineStageAccessSet { + #[inline] + fn bitxor_assign(&mut self, rhs: Self) { + *self = self.symmetric_difference(rhs); + } +} + +impl std::ops::Sub for PipelineStageAccessSet { + type Output = Self; + + #[inline] + fn sub(self, rhs: Self) -> Self { + self.difference(rhs) + } +} + +impl std::ops::SubAssign for PipelineStageAccessSet { + #[inline] + fn sub_assign(&mut self, rhs: Self) { + *self = self.difference(rhs); + } +} + +impl From for PipelineStageAccessSet { + #[inline] + fn from(val: PipelineStageAccess) -> Self { + debug_assert!(val != PipelineStageAccess::__MAX_VALUE__); // You did something very dumb... + Self(1u128 << val as u8) + } +} + +impl From for PipelineStageAccessSet { + #[inline] + fn from(stages: PipelineStages) -> Self { + let mut result = Self::empty(); + + if stages.intersects(PipelineStages::DRAW_INDIRECT) { + result |= Self::from(PipelineStageAccess::DrawIndirect_IndirectCommandRead) + | Self::from(PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead) + } + + if stages.intersects(PipelineStages::VERTEX_SHADER) { + result |= Self::from(PipelineStageAccess::VertexShader_UniformRead) + | Self::from(PipelineStageAccess::VertexShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::VertexShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::VertexShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::VertexShader_AccelerationStructureRead) + } + + if stages.intersects(PipelineStages::TESSELLATION_CONTROL_SHADER) { + result |= Self::from(PipelineStageAccess::TessellationControlShader_UniformRead) + | Self::from(PipelineStageAccess::TessellationControlShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageWrite) + | Self::from( + PipelineStageAccess::TessellationControlShader_AccelerationStructureRead, + ) + } + + if stages.intersects(PipelineStages::TESSELLATION_EVALUATION_SHADER) { + result |= Self::from(PipelineStageAccess::TessellationEvaluationShader_UniformRead) + | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite) + | Self::from( + PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead, + ) + } + + if stages.intersects(PipelineStages::GEOMETRY_SHADER) { + result |= Self::from(PipelineStageAccess::GeometryShader_UniformRead) + | Self::from(PipelineStageAccess::GeometryShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::GeometryShader_AccelerationStructureRead) + } + + if stages.intersects(PipelineStages::FRAGMENT_SHADER) { + result |= Self::from(PipelineStageAccess::FragmentShader_UniformRead) + | Self::from(PipelineStageAccess::FragmentShader_InputAttachmentRead) + | Self::from(PipelineStageAccess::FragmentShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::FragmentShader_AccelerationStructureRead) + } + + if stages.intersects(PipelineStages::EARLY_FRAGMENT_TESTS) { + result |= Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead) + | Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite) + } + + if stages.intersects(PipelineStages::LATE_FRAGMENT_TESTS) { + result |= Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead) + | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite) + } + + if stages.intersects(PipelineStages::COLOR_ATTACHMENT_OUTPUT) { + result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead) + | Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite) + | Self::from( + PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent, + ) + } + + if stages.intersects(PipelineStages::COMPUTE_SHADER) { + result |= Self::from(PipelineStageAccess::ComputeShader_UniformRead) + | Self::from(PipelineStageAccess::ComputeShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::ComputeShader_AccelerationStructureRead) + } + + if stages.intersects(PipelineStages::HOST) { + result |= Self::from(PipelineStageAccess::Host_HostRead) + | Self::from(PipelineStageAccess::Host_HostWrite) + } + + if stages.intersects(PipelineStages::COPY) { + result |= Self::from(PipelineStageAccess::Copy_TransferRead) + | Self::from(PipelineStageAccess::Copy_TransferWrite) + } + + if stages.intersects(PipelineStages::RESOLVE) { + result |= Self::from(PipelineStageAccess::Resolve_TransferRead) + | Self::from(PipelineStageAccess::Resolve_TransferWrite) + } + + if stages.intersects(PipelineStages::BLIT) { + result |= Self::from(PipelineStageAccess::Blit_TransferRead) + | Self::from(PipelineStageAccess::Blit_TransferWrite) + } + + if stages.intersects(PipelineStages::CLEAR) { + result |= Self::from(PipelineStageAccess::Clear_TransferWrite) + } + + if stages.intersects(PipelineStages::INDEX_INPUT) { + result |= Self::from(PipelineStageAccess::IndexInput_IndexRead) + } + + if stages.intersects(PipelineStages::VERTEX_ATTRIBUTE_INPUT) { + result |= Self::from(PipelineStageAccess::VertexAttributeInput_VertexAttributeRead) + } + + if stages.intersects(PipelineStages::VIDEO_DECODE) { + result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeRead) + | Self::from(PipelineStageAccess::VideoDecode_VideoDecodeWrite) + } + + if stages.intersects(PipelineStages::VIDEO_ENCODE) { + result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeRead) + | Self::from(PipelineStageAccess::VideoEncode_VideoEncodeWrite) + } + + if stages.intersects(PipelineStages::TRANSFORM_FEEDBACK) { + result |= Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackWrite) + | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead) + | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite) + } + + if stages.intersects(PipelineStages::CONDITIONAL_RENDERING) { + result |= Self::from(PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead) + } + + if stages.intersects(PipelineStages::ACCELERATION_STRUCTURE_BUILD) { + result |= + Self::from(PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_UniformRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferWrite) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead) + | Self::from( + PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead, + ) + | Self::from( + PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite, + ) + // | Self::from(PipelineStageAccess::AccelerationStructureBuild_MicromapRead) + } + + if stages.intersects(PipelineStages::RAY_TRACING_SHADER) { + result |= Self::from(PipelineStageAccess::RayTracingShader_UniformRead) + | Self::from(PipelineStageAccess::RayTracingShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::RayTracingShader_AccelerationStructureRead) + // | Self::from(PipelineStageAccess::RayTracingShader_ShaderBindingTableRead) + } + + if stages.intersects(PipelineStages::FRAGMENT_DENSITY_PROCESS) { + result |= Self::from(PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead) + } + + if stages.intersects(PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT) { + result |= + PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead + .into() + } + + if stages.intersects(PipelineStages::COMMAND_PREPROCESS) { + result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessRead) + | Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite) + } + + if stages.intersects(PipelineStages::TASK_SHADER) { + result |= Self::from(PipelineStageAccess::TaskShader_UniformRead) + | Self::from(PipelineStageAccess::TaskShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::TaskShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::TaskShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::TaskShader_AccelerationStructureRead) + } + + if stages.intersects(PipelineStages::MESH_SHADER) { + result |= Self::from(PipelineStageAccess::MeshShader_UniformRead) + | Self::from(PipelineStageAccess::MeshShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::MeshShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::MeshShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::MeshShader_AccelerationStructureRead) + } + + if stages.intersects(PipelineStages::SUBPASS_SHADING) { + result |= Self::from(PipelineStageAccess::SubpassShading_InputAttachmentRead) + } + + if stages.intersects(PipelineStages::INVOCATION_MASK) { + result |= Self::from(PipelineStageAccess::InvocationMask_InvocationMaskRead) + } + + /* + if stages.intersects(PipelineStages::OPTICAL_FLOW) { + result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowRead) + | Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowWrite) + } + + if stages.intersects(PipelineStages::MICROMAP_BUILD) { + result |= Self::from(PipelineStageAccess::MicromapBuild_MicromapWrite) + | Self::from(PipelineStageAccess::MicromapBuild_MicromapRead) + } + */ + + result + } +} + +impl From for PipelineStageAccessSet { + #[inline] + fn from(access: AccessFlags) -> Self { + let mut result = Self::empty(); + + if access.intersects(AccessFlags::INDIRECT_COMMAND_READ) { + result |= Self::from(PipelineStageAccess::DrawIndirect_IndirectCommandRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead) + } + + if access.intersects(AccessFlags::INDEX_READ) { + result |= Self::from(PipelineStageAccess::IndexInput_IndexRead) + } + + if access.intersects(AccessFlags::VERTEX_ATTRIBUTE_READ) { + result |= Self::from(PipelineStageAccess::VertexAttributeInput_VertexAttributeRead) + } + + if access.intersects(AccessFlags::UNIFORM_READ) { + result |= Self::from(PipelineStageAccess::VertexShader_UniformRead) + | Self::from(PipelineStageAccess::TessellationControlShader_UniformRead) + | Self::from(PipelineStageAccess::TessellationEvaluationShader_UniformRead) + | Self::from(PipelineStageAccess::GeometryShader_UniformRead) + | Self::from(PipelineStageAccess::FragmentShader_UniformRead) + | Self::from(PipelineStageAccess::ComputeShader_UniformRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_UniformRead) + | Self::from(PipelineStageAccess::RayTracingShader_UniformRead) + | Self::from(PipelineStageAccess::TaskShader_UniformRead) + | Self::from(PipelineStageAccess::MeshShader_UniformRead) + } + + if access.intersects(AccessFlags::INPUT_ATTACHMENT_READ) { + result |= Self::from(PipelineStageAccess::FragmentShader_InputAttachmentRead) + | Self::from(PipelineStageAccess::SubpassShading_InputAttachmentRead) + } + + if access.intersects(AccessFlags::COLOR_ATTACHMENT_READ) { + result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead) + } + + if access.intersects(AccessFlags::COLOR_ATTACHMENT_WRITE) { + result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite) + } + + if access.intersects(AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ) { + result |= Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead) + | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead) + } + + if access.intersects(AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE) { + result |= + Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite) + | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite) + } + + if access.intersects(AccessFlags::TRANSFER_READ) { + result |= Self::from(PipelineStageAccess::Copy_TransferRead) + | Self::from(PipelineStageAccess::Resolve_TransferRead) + | Self::from(PipelineStageAccess::Blit_TransferRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferRead) + } + + if access.intersects(AccessFlags::TRANSFER_WRITE) { + result |= Self::from(PipelineStageAccess::Copy_TransferWrite) + | Self::from(PipelineStageAccess::Resolve_TransferWrite) + | Self::from(PipelineStageAccess::Blit_TransferWrite) + | Self::from(PipelineStageAccess::Clear_TransferWrite) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferWrite) + } + + if access.intersects(AccessFlags::HOST_READ) { + result |= Self::from(PipelineStageAccess::Host_HostRead) + } + + if access.intersects(AccessFlags::HOST_WRITE) { + result |= Self::from(PipelineStageAccess::Host_HostWrite) + } + + if access.intersects(AccessFlags::SHADER_SAMPLED_READ) { + result |= Self::from(PipelineStageAccess::VertexShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::TessellationControlShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::GeometryShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::FragmentShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::ComputeShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead) + | Self::from(PipelineStageAccess::RayTracingShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::TaskShader_ShaderSampledRead) + | Self::from(PipelineStageAccess::MeshShader_ShaderSampledRead) + } + + if access.intersects(AccessFlags::SHADER_STORAGE_READ) { + result |= Self::from(PipelineStageAccess::VertexShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead) + | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::TaskShader_ShaderStorageRead) + | Self::from(PipelineStageAccess::MeshShader_ShaderStorageRead) + } + + if access.intersects(AccessFlags::SHADER_STORAGE_WRITE) { + result |= Self::from(PipelineStageAccess::VertexShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::TaskShader_ShaderStorageWrite) + | Self::from(PipelineStageAccess::MeshShader_ShaderStorageWrite) + } + + if access.intersects(AccessFlags::VIDEO_DECODE_READ) { + result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeRead) + } + + if access.intersects(AccessFlags::VIDEO_DECODE_WRITE) { + result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeWrite) + } + + if access.intersects(AccessFlags::VIDEO_ENCODE_READ) { + result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeRead) + } + + if access.intersects(AccessFlags::VIDEO_ENCODE_WRITE) { + result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeWrite) + } + + if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_WRITE) { + result |= Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackWrite) + } + + if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ) { + result |= Self::from(PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead) + | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead) + } + + if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE) { + result |= + Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite) + } + + if access.intersects(AccessFlags::CONDITIONAL_RENDERING_READ) { + result |= Self::from(PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead) + } + + if access.intersects(AccessFlags::COMMAND_PREPROCESS_READ) { + result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessRead) + } + + if access.intersects(AccessFlags::COMMAND_PREPROCESS_WRITE) { + result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite) + } + + if access.intersects(AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ) { + result |= + Self::from(PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead) + } + + if access.intersects(AccessFlags::ACCELERATION_STRUCTURE_READ) { + result |= Self::from(PipelineStageAccess::VertexShader_AccelerationStructureRead) + | Self::from( + PipelineStageAccess::TessellationControlShader_AccelerationStructureRead, + ) + | Self::from( + PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead, + ) + | Self::from(PipelineStageAccess::GeometryShader_AccelerationStructureRead) + | Self::from(PipelineStageAccess::FragmentShader_AccelerationStructureRead) + | Self::from(PipelineStageAccess::ComputeShader_AccelerationStructureRead) + | Self::from( + PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead, + ) + | Self::from(PipelineStageAccess::RayTracingShader_AccelerationStructureRead) + | Self::from(PipelineStageAccess::TaskShader_AccelerationStructureRead) + | Self::from(PipelineStageAccess::MeshShader_AccelerationStructureRead) + } + + if access.intersects(AccessFlags::ACCELERATION_STRUCTURE_WRITE) { + result |= Self::from( + PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite, + ) + } + + if access.intersects(AccessFlags::FRAGMENT_DENSITY_MAP_READ) { + result |= Self::from(PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead) + } + + if access.intersects(AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT) { + result |= Self::from( + PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent, + ) + } + + if access.intersects(AccessFlags::INVOCATION_MASK_READ) { + result |= Self::from(PipelineStageAccess::InvocationMask_InvocationMaskRead) + } + + /* + if access.intersects(AccessFlags::SHADER_BINDING_TABLE_READ) { + result |= Self::from(PipelineStageAccess::RayTracingShader_ShaderBindingTableRead) + } + + if access.intersects(AccessFlags::MICROMAP_READ) { + result |= Self::from(PipelineStageAccess::AccelerationStructureBuild_MicromapRead) + | Self::from(PipelineStageAccess::MicromapBuild_MicromapRead) + } + + if access.intersects(AccessFlags::MICROMAP_WRITE) { + result |= Self::from(PipelineStageAccess::MicromapBuild_MicromapWrite) + } + + if access.intersects(AccessFlags::OPTICAL_FLOW_READ) { + result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowRead) + } + + if access.intersects(AccessFlags::OPTICAL_FLOW_WRITE) { + result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowWrite) + } + */ + + result + } +} + /// Dependency info for barriers in a pipeline barrier or event command. /// /// A pipeline barrier creates a dependency between commands submitted before the barrier (the