mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-21 22:34:43 +00:00
Task graph [2/10]: the task graph data structure (#2545)
This commit is contained in:
parent
48566ae108
commit
e6e4bc6a26
3
Cargo.lock
generated
3
Cargo.lock
generated
@ -2339,7 +2339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00bbb9a832cd697a36c2abd5ef58c263b0bc33cdf280f704b895646ed3e9f595"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-targets 0.52.0",
|
||||
"windows-targets 0.52.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2363,6 +2363,7 @@ dependencies = [
|
||||
"half",
|
||||
"heck",
|
||||
"indexmap",
|
||||
"libc",
|
||||
"libloading 0.8.3",
|
||||
"nom",
|
||||
"objc",
|
||||
|
1257
vulkano-taskgraph/src/graph/mod.rs
Normal file
1257
vulkano-taskgraph/src/graph/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -3,7 +3,10 @@
|
||||
#![forbid(unsafe_op_in_unsafe_fn)]
|
||||
|
||||
use concurrent_slotmap::SlotId;
|
||||
use resource::{BufferRange, BufferState, DeathRow, ImageState, Resources, SwapchainState};
|
||||
use graph::ResourceAccesses;
|
||||
use resource::{
|
||||
AccessType, BufferRange, BufferState, DeathRow, ImageState, Resources, SwapchainState,
|
||||
};
|
||||
use std::{
|
||||
any::{Any, TypeId},
|
||||
cell::Cell,
|
||||
@ -27,6 +30,7 @@ use vulkano::{
|
||||
DeviceSize, ValidationError, VulkanError,
|
||||
};
|
||||
|
||||
pub mod graph;
|
||||
pub mod resource;
|
||||
|
||||
/// A task represents a unit of work to be recorded to a command buffer.
|
||||
@ -117,6 +121,7 @@ pub struct TaskContext<'a> {
|
||||
death_row: Cell<Option<&'a mut DeathRow>>,
|
||||
current_command_buffer: Cell<Option<&'a mut RawRecordingCommandBuffer>>,
|
||||
command_buffers: Cell<Option<&'a mut Vec<RawCommandBuffer>>>,
|
||||
accesses: &'a ResourceAccesses,
|
||||
}
|
||||
|
||||
impl<'a> TaskContext<'a> {
|
||||
@ -246,6 +251,7 @@ impl<'a> TaskContext<'a> {
|
||||
#[cold]
|
||||
unsafe fn invalidate_subbuffer(
|
||||
tcx: &TaskContext<'_>,
|
||||
id: Id<Buffer>,
|
||||
subbuffer: &Subbuffer<[u8]>,
|
||||
allocation: &ResourceMemory,
|
||||
atom_size: DeviceAlignment,
|
||||
@ -259,7 +265,7 @@ impl<'a> TaskContext<'a> {
|
||||
);
|
||||
let range = Range { start, end };
|
||||
|
||||
tcx.validate_read_buffer(subbuffer.buffer(), range.clone())?;
|
||||
tcx.validate_read_buffer(id, range.clone())?;
|
||||
|
||||
let memory_range = MappedMemoryRange {
|
||||
offset: range.start,
|
||||
@ -310,10 +316,10 @@ impl<'a> TaskContext<'a> {
|
||||
// SAFETY:
|
||||
// `subbuffer.mapped_slice()` didn't return an error, which means that the subbuffer
|
||||
// falls within the mapped range of the memory.
|
||||
unsafe { invalidate_subbuffer(self, subbuffer.as_bytes(), allocation, atom_size) }?;
|
||||
unsafe { invalidate_subbuffer(self, id, subbuffer.as_bytes(), allocation, atom_size) }?;
|
||||
} else {
|
||||
let range = subbuffer.offset()..subbuffer.offset() + subbuffer.size();
|
||||
self.validate_write_buffer(buffer, range)?;
|
||||
self.validate_write_buffer(id, range)?;
|
||||
}
|
||||
|
||||
// SAFETY: We checked that the task has read access to the subbuffer above, which also
|
||||
@ -325,8 +331,25 @@ impl<'a> TaskContext<'a> {
|
||||
Ok(BufferReadGuard { data })
|
||||
}
|
||||
|
||||
fn validate_read_buffer(&self, _buffer: &Buffer, _range: BufferRange) -> TaskResult {
|
||||
todo!()
|
||||
fn validate_read_buffer(
|
||||
&self,
|
||||
id: Id<Buffer>,
|
||||
range: BufferRange,
|
||||
) -> Result<(), Box<ValidationError>> {
|
||||
if !self
|
||||
.accesses
|
||||
.contains_buffer_access(id, range, AccessType::HostRead)
|
||||
{
|
||||
return Err(Box::new(ValidationError {
|
||||
context: "TaskContext::read_buffer".into(),
|
||||
problem: "the task node does not have an access of type `AccessType::HostRead` \
|
||||
for the range of the buffer"
|
||||
.into(),
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets read access to a portion of the buffer corresponding to `id` without checking if this
|
||||
@ -429,6 +452,7 @@ impl<'a> TaskContext<'a> {
|
||||
#[cold]
|
||||
unsafe fn invalidate_subbuffer(
|
||||
tcx: &TaskContext<'_>,
|
||||
id: Id<Buffer>,
|
||||
subbuffer: &Subbuffer<[u8]>,
|
||||
allocation: &ResourceMemory,
|
||||
atom_size: DeviceAlignment,
|
||||
@ -442,7 +466,7 @@ impl<'a> TaskContext<'a> {
|
||||
);
|
||||
let range = Range { start, end };
|
||||
|
||||
tcx.validate_write_buffer(subbuffer.buffer(), range.clone())?;
|
||||
tcx.validate_write_buffer(id, range.clone())?;
|
||||
|
||||
let memory_range = MappedMemoryRange {
|
||||
offset: range.start,
|
||||
@ -493,10 +517,10 @@ impl<'a> TaskContext<'a> {
|
||||
// SAFETY:
|
||||
// `subbuffer.mapped_slice()` didn't return an error, which means that the subbuffer
|
||||
// falls within the mapped range of the memory.
|
||||
unsafe { invalidate_subbuffer(self, subbuffer.as_bytes(), allocation, atom_size) }?;
|
||||
unsafe { invalidate_subbuffer(self, id, subbuffer.as_bytes(), allocation, atom_size) }?;
|
||||
} else {
|
||||
let range = subbuffer.offset()..subbuffer.offset() + subbuffer.size();
|
||||
self.validate_write_buffer(buffer, range)?;
|
||||
self.validate_write_buffer(id, range)?;
|
||||
}
|
||||
|
||||
// SAFETY: We checked that the task has write access to the subbuffer above, which also
|
||||
@ -512,8 +536,25 @@ impl<'a> TaskContext<'a> {
|
||||
})
|
||||
}
|
||||
|
||||
fn validate_write_buffer(&self, _buffer: &Buffer, _range: BufferRange) -> TaskResult {
|
||||
todo!()
|
||||
fn validate_write_buffer(
|
||||
&self,
|
||||
id: Id<Buffer>,
|
||||
range: BufferRange,
|
||||
) -> Result<(), Box<ValidationError>> {
|
||||
if !self
|
||||
.accesses
|
||||
.contains_buffer_access(id, range, AccessType::HostWrite)
|
||||
{
|
||||
return Err(Box::new(ValidationError {
|
||||
context: "TaskContext::write_buffer".into(),
|
||||
problem: "the task node does not have an access of type `AccessType::HostWrite` \
|
||||
for the range of the buffer"
|
||||
.into(),
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets write access to a portion of the buffer corresponding to `id` without checking if this
|
||||
@ -768,7 +809,7 @@ impl InvalidSlotError {
|
||||
impl fmt::Display for InvalidSlotError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let &InvalidSlotError { slot } = self;
|
||||
let object_type = match slot.tag() {
|
||||
let object_type = match slot.tag() & OBJECT_TYPE_MASK {
|
||||
0 => ObjectType::Buffer,
|
||||
1 => ObjectType::Image,
|
||||
2 => ObjectType::Swapchain,
|
||||
@ -860,6 +901,10 @@ impl<T> Id<T> {
|
||||
marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn index(self) -> u32 {
|
||||
self.slot.index()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for Id<T> {
|
||||
@ -874,8 +919,8 @@ impl<T> Copy for Id<T> {}
|
||||
impl<T> fmt::Debug for Id<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Id")
|
||||
.field("generation", &self.slot.generation())
|
||||
.field("index", &self.slot.index())
|
||||
.field("generation", &self.slot.generation())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -940,6 +985,13 @@ enum ObjectType {
|
||||
Flight = 3,
|
||||
}
|
||||
|
||||
const BUFFER_TAG: u32 = ObjectType::Buffer as u32;
|
||||
const IMAGE_TAG: u32 = ObjectType::Image as u32;
|
||||
const SWAPCHAIN_TAG: u32 = ObjectType::Swapchain as u32;
|
||||
const FLIGHT_TAG: u32 = ObjectType::Flight as u32;
|
||||
|
||||
const OBJECT_TYPE_MASK: u32 = 0b11;
|
||||
|
||||
// SAFETY: ZSTs can always be safely produced out of thin air, barring any safety invariants they
|
||||
// might impose, which in the case of `NonExhaustive` are none.
|
||||
const NE: vulkano::NonExhaustive =
|
||||
|
@ -1,6 +1,6 @@
|
||||
//! Synchronization state tracking of all resources.
|
||||
|
||||
use crate::{Id, InvalidSlotError, ObjectType, Ref};
|
||||
use crate::{Id, InvalidSlotError, Ref, BUFFER_TAG, FLIGHT_TAG, IMAGE_TAG, SWAPCHAIN_TAG};
|
||||
use ash::vk;
|
||||
use concurrent_slotmap::{epoch, SlotMap};
|
||||
use parking_lot::{Mutex, MutexGuard};
|
||||
@ -38,11 +38,6 @@ use vulkano::{
|
||||
|
||||
static REGISTERED_DEVICES: Mutex<Vec<usize>> = Mutex::new(Vec::new());
|
||||
|
||||
const BUFFER_TAG: u32 = ObjectType::Buffer as u32;
|
||||
const IMAGE_TAG: u32 = ObjectType::Image as u32;
|
||||
const SWAPCHAIN_TAG: u32 = ObjectType::Swapchain as u32;
|
||||
const FLIGHT_TAG: u32 = ObjectType::Flight as u32;
|
||||
|
||||
/// Tracks the synchronization state of all resources.
|
||||
///
|
||||
/// There can only exist one `Resources` collection per device, because there must only be one
|
||||
@ -117,7 +112,6 @@ pub struct Flight {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct FlightState {
|
||||
pub(crate) swapchains: SmallVec<[Id<Swapchain>; 1]>,
|
||||
pub(crate) death_rows: SmallVec<[DeathRow; 3]>,
|
||||
}
|
||||
|
||||
@ -214,6 +208,7 @@ impl Resources {
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if the instance of `surface` is not the same as that of `self.device()`.
|
||||
/// - Panics if `flight_id` is invalid.
|
||||
/// - Panics if `create_info.min_image_count` is not greater than or equal to the number of
|
||||
/// [frames] of the flight corresponding to `flight_id`.
|
||||
///
|
||||
@ -276,7 +271,6 @@ impl Resources {
|
||||
current_frame: AtomicU32::new(0),
|
||||
fences,
|
||||
state: Mutex::new(FlightState {
|
||||
swapchains: SmallVec::new(),
|
||||
death_rows: (0..frame_count.get()).map(|_| Vec::new()).collect(),
|
||||
}),
|
||||
};
|
||||
@ -462,30 +456,11 @@ impl Resources {
|
||||
last_accesses: Mutex::new(RangeMap::new()),
|
||||
};
|
||||
|
||||
unsafe {
|
||||
state.set_access(
|
||||
ImageSubresourceRange {
|
||||
aspects: ImageAspects::COLOR,
|
||||
mip_levels: 0..1,
|
||||
array_layers: 0..state.swapchain.image_array_layers(),
|
||||
},
|
||||
ImageAccess::NONE,
|
||||
);
|
||||
}
|
||||
unsafe { state.set_access(0..state.swapchain.image_array_layers(), ImageAccess::NONE) };
|
||||
|
||||
let slot = self.swapchains.insert_with_tag(state, SWAPCHAIN_TAG, guard);
|
||||
let id = Id::new(slot);
|
||||
|
||||
self.flights
|
||||
.get(flight_id.slot, guard)
|
||||
.unwrap()
|
||||
.state
|
||||
// FIXME:
|
||||
.lock()
|
||||
.swapchains
|
||||
.push(id);
|
||||
|
||||
Ok(id)
|
||||
Ok(Id::new(slot))
|
||||
}
|
||||
|
||||
/// Removes the buffer corresponding to `id`.
|
||||
@ -524,20 +499,10 @@ impl Resources {
|
||||
/// pending command buffer, and if it is used in any command buffer that's in the executable
|
||||
/// or recording state, that command buffer must never be executed.
|
||||
pub unsafe fn remove_swapchain(&self, id: Id<Swapchain>) -> Result<Ref<'_, SwapchainState>> {
|
||||
let state = self
|
||||
.swapchains
|
||||
self.swapchains
|
||||
.remove(id.slot, self.pin())
|
||||
.map(Ref)
|
||||
.ok_or(InvalidSlotError::new(id))?;
|
||||
let flight_id = state.flight_id;
|
||||
|
||||
let flight = self.flights.get(flight_id.slot, self.pin()).unwrap();
|
||||
// FIXME:
|
||||
let swapchains = &mut flight.state.lock().swapchains;
|
||||
let index = swapchains.iter().position(|&x| x == id).unwrap();
|
||||
swapchains.remove(index);
|
||||
|
||||
Ok(state)
|
||||
.ok_or(InvalidSlotError::new(id))
|
||||
}
|
||||
|
||||
/// Returns the buffer corresponding to `id`.
|
||||
@ -659,7 +624,7 @@ impl BufferState {
|
||||
assert!(!range.is_empty());
|
||||
|
||||
BufferAccesses {
|
||||
inner: MutexGuard::leak(self.last_accesses.lock()).overlapping(range),
|
||||
overlapping: MutexGuard::leak(self.last_accesses.lock()).overlapping(range),
|
||||
// SAFETY: We locked the mutex above.
|
||||
_guard: unsafe { AccessesGuard::new(&self.last_accesses) },
|
||||
}
|
||||
@ -730,14 +695,14 @@ impl ImageState {
|
||||
#[inline]
|
||||
pub fn accesses(&self, subresource_range: ImageSubresourceRange) -> ImageAccesses<'_> {
|
||||
let subresource_ranges = SubresourceRanges::from_image(&self.image, subresource_range);
|
||||
let map = MutexGuard::leak(self.last_accesses.lock());
|
||||
let last_accesses = MutexGuard::leak(self.last_accesses.lock());
|
||||
|
||||
ImageAccesses {
|
||||
mip_levels: self.image.mip_levels(),
|
||||
array_layers: self.image.array_layers(),
|
||||
subresource_ranges,
|
||||
overlapping: map.overlapping(0..0),
|
||||
map,
|
||||
overlapping: last_accesses.overlapping(0..0),
|
||||
last_accesses,
|
||||
// SAFETY: We locked the mutex above.
|
||||
_guard: unsafe { AccessesGuard::new(&self.last_accesses) },
|
||||
}
|
||||
@ -862,31 +827,33 @@ impl SwapchainState {
|
||||
&self.images[self.current_image_index.load(Ordering::Relaxed) as usize]
|
||||
}
|
||||
|
||||
pub(crate) fn accesses(&self, subresource_range: ImageSubresourceRange) -> ImageAccesses<'_> {
|
||||
assert_eq!(subresource_range.aspects, ImageAspects::COLOR);
|
||||
|
||||
pub(crate) fn accesses(&self, array_layers: Range<u32>) -> ImageAccesses<'_> {
|
||||
let subresource_range = ImageSubresourceRange {
|
||||
aspects: ImageAspects::COLOR,
|
||||
mip_levels: 0..1,
|
||||
array_layers,
|
||||
};
|
||||
let subresource_ranges =
|
||||
SubresourceRanges::new(subresource_range, 1, self.swapchain.image_array_layers());
|
||||
let map = MutexGuard::leak(self.last_accesses.lock());
|
||||
let last_accesses = MutexGuard::leak(self.last_accesses.lock());
|
||||
|
||||
ImageAccesses {
|
||||
mip_levels: 1,
|
||||
array_layers: self.swapchain.image_array_layers(),
|
||||
subresource_ranges,
|
||||
overlapping: map.overlapping(0..0),
|
||||
map,
|
||||
overlapping: last_accesses.overlapping(0..0),
|
||||
last_accesses,
|
||||
// SAFETY: We locked the mutex above.
|
||||
_guard: unsafe { AccessesGuard::new(&self.last_accesses) },
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) unsafe fn set_access(
|
||||
&self,
|
||||
subresource_range: ImageSubresourceRange,
|
||||
access: ImageAccess,
|
||||
) {
|
||||
assert_eq!(subresource_range.aspects, ImageAspects::COLOR);
|
||||
|
||||
pub(crate) unsafe fn set_access(&self, array_layers: Range<u32>, access: ImageAccess) {
|
||||
let subresource_range = ImageSubresourceRange {
|
||||
aspects: ImageAspects::COLOR,
|
||||
mip_levels: 0..1,
|
||||
array_layers,
|
||||
};
|
||||
let mut last_accesses = self.last_accesses.lock();
|
||||
|
||||
for range in
|
||||
@ -964,7 +931,7 @@ pub type BufferRange = Range<DeviceSize>;
|
||||
///
|
||||
/// [`accesses`]: BufferState::accesses
|
||||
pub struct BufferAccesses<'a> {
|
||||
inner: rangemap::map::Overlapping<'a, DeviceSize, BufferAccess, Range<DeviceSize>>,
|
||||
overlapping: rangemap::map::Overlapping<'a, DeviceSize, BufferAccess, Range<DeviceSize>>,
|
||||
_guard: AccessesGuard<'a, BufferAccess>,
|
||||
}
|
||||
|
||||
@ -973,7 +940,7 @@ impl<'a> Iterator for BufferAccesses<'a> {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner
|
||||
self.overlapping
|
||||
.next()
|
||||
.map(|(range, access)| (range.clone(), access))
|
||||
}
|
||||
@ -991,7 +958,7 @@ pub struct ImageAccesses<'a> {
|
||||
array_layers: u32,
|
||||
subresource_ranges: SubresourceRanges,
|
||||
overlapping: rangemap::map::Overlapping<'a, DeviceSize, ImageAccess, Range<DeviceSize>>,
|
||||
map: &'a RangeMap<DeviceSize, ImageAccess>,
|
||||
last_accesses: &'a RangeMap<DeviceSize, ImageAccess>,
|
||||
_guard: AccessesGuard<'a, ImageAccess>,
|
||||
}
|
||||
|
||||
@ -1000,17 +967,17 @@ impl<'a> Iterator for ImageAccesses<'a> {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
loop {
|
||||
if let Some((range, access)) = self.overlapping.next() {
|
||||
let subresource_range =
|
||||
range_to_subresources(range.clone(), self.mip_levels, self.array_layers);
|
||||
|
||||
Some((subresource_range, access))
|
||||
break Some((subresource_range, access));
|
||||
} else if let Some(range) = self.subresource_ranges.next() {
|
||||
self.overlapping = self.map.overlapping(range);
|
||||
|
||||
self.next()
|
||||
self.overlapping = self.last_accesses.overlapping(range);
|
||||
} else {
|
||||
None
|
||||
break None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1080,6 +1047,7 @@ impl SubresourceRanges {
|
||||
) -> Self {
|
||||
assert!(subresource_range.mip_levels.end <= image_mip_levels);
|
||||
assert!(subresource_range.array_layers.end <= image_array_layers);
|
||||
assert!(!subresource_range.aspects.is_empty());
|
||||
assert!(!subresource_range.mip_levels.is_empty());
|
||||
assert!(!subresource_range.array_layers.is_empty());
|
||||
|
||||
@ -1839,6 +1807,24 @@ access_types! {
|
||||
}
|
||||
}
|
||||
|
||||
impl AccessType {
|
||||
pub(crate) const fn is_valid_buffer_access_type(self) -> bool {
|
||||
// Let's reuse the image layout lookup table, since it already exists.
|
||||
let image_layout = self.image_layout();
|
||||
|
||||
matches!(image_layout, ImageLayout::Undefined) && !matches!(self, AccessType::None)
|
||||
}
|
||||
|
||||
pub(crate) const fn is_valid_image_access_type(self) -> bool {
|
||||
let image_layout = self.image_layout();
|
||||
|
||||
!matches!(
|
||||
image_layout,
|
||||
ImageLayout::Undefined | ImageLayout::PresentSrc,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Specifies which type of layout an image subresource is accessed in.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
#[non_exhaustive]
|
||||
|
@ -74,6 +74,23 @@ where
|
||||
Concurrent(I),
|
||||
}
|
||||
|
||||
impl<I> Sharing<I>
|
||||
where
|
||||
I: IntoIterator<Item = u32>,
|
||||
{
|
||||
/// Returns `true` if `self` is the `Exclusive` variant.
|
||||
#[inline]
|
||||
pub fn is_exclusive(&self) -> bool {
|
||||
matches!(self, Self::Exclusive)
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` is the `Concurrent` variant.
|
||||
#[inline]
|
||||
pub fn is_concurrent(&self) -> bool {
|
||||
matches!(self, Self::Concurrent(..))
|
||||
}
|
||||
}
|
||||
|
||||
/// How the memory of a resource is currently being accessed.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum CurrentAccess {
|
||||
|
Loading…
Reference in New Issue
Block a user