Rework command buffers

This commit is contained in:
Pierre Krieger 2017-06-07 19:47:35 +02:00
parent 934169ddfb
commit d372724054
64 changed files with 2803 additions and 2743 deletions

View File

@ -21,7 +21,6 @@ extern crate vulkano_shader_derive;
extern crate vulkano_win;
use vulkano_win::VkSurfaceBuild;
use vulkano::command_buffer::CommandBufferBuilder;
use vulkano::sync::GpuFuture;
use std::sync::Arc;

View File

@ -22,7 +22,6 @@ extern crate vulkano_shader_derive;
extern crate vulkano_win;
use vulkano_win::VkSurfaceBuild;
use vulkano::command_buffer::CommandBufferBuilder;
use vulkano::sync::GpuFuture;
use std::sync::Arc;

View File

@ -40,7 +40,6 @@ use vulkano_win::VkSurfaceBuild;
use vulkano::buffer::BufferUsage;
use vulkano::buffer::CpuAccessibleBuffer;
use vulkano::command_buffer::AutoCommandBufferBuilder;
use vulkano::command_buffer::CommandBufferBuilder;
use vulkano::command_buffer::DynamicState;
use vulkano::device::Device;
use vulkano::framebuffer::Framebuffer;

View File

@ -38,13 +38,9 @@ use buffer::traits::BufferInner;
use buffer::traits::Buffer;
use buffer::traits::TypedBuffer;
use buffer::traits::TypedBufferAccess;
use command_buffer::cb::AddCommand;
use command_buffer::commands_raw::CmdCopyBuffer;
use command_buffer::commands_raw::CmdCopyBufferError;
use command_buffer::AutoCommandBufferBuilder;
use command_buffer::AutoCommandBuffer;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferBuilder;
use command_buffer::CommandBufferBuilderError;
use command_buffer::CommandBufferExecFuture;
use device::Device;
use device::DeviceOwned;
@ -81,7 +77,7 @@ pub struct ImmutableBuffer<T: ?Sized, A = StdMemoryPoolAlloc> {
}
// TODO: make this prettier
type ImmutableBufferFromBufferFuture = CommandBufferExecFuture<NowFuture, ::command_buffer::cb::SubmitSyncLayer<::command_buffer::cb::AbstractStorageLayer<::command_buffer::cb::UnsafeCommandBuffer<Arc<::command_buffer::pool::standard::StandardCommandPool>>>>>;
type ImmutableBufferFromBufferFuture = CommandBufferExecFuture<NowFuture, AutoCommandBuffer>;
impl<T: ?Sized> ImmutableBuffer<T> {
/// Builds an `ImmutableBuffer` from some data.
@ -116,38 +112,6 @@ impl<T: ?Sized> ImmutableBuffer<T> {
B::Access: 'static + Clone + Send + Sync,
I: IntoIterator<Item = QueueFamily<'a>>,
T: 'static + Send + Sync,
{
let cb = AutoCommandBufferBuilder::new(source.device().clone(), queue.family())?;
let (buf, cb) = match ImmutableBuffer::from_buffer_with_builder(source, usage,
queue_families, cb)
{
Ok(v) => v,
Err(ImmutableBufferFromBufferWithBuilderError::OomError(err)) => return Err(err),
Err(ImmutableBufferFromBufferWithBuilderError::CommandBufferBuilderError(_)) => {
// Example errors that can trigger this: forbidden while inside render pass,
// ranges overlapping between buffers, missing usage in one of the buffers, etc.
// None of them can actually happen.
unreachable!()
},
};
let future = match cb.build()?.execute(queue) {
Ok(f) => f,
Err(_) => unreachable!()
};
Ok((buf, future))
}
/// Builds an `ImmutableBuffer` that copies its data from another buffer.
pub fn from_buffer_with_builder<'a, B, I, Cb, O>(source: B, usage: BufferUsage, queue_families: I,
builder: Cb)
-> Result<(Arc<ImmutableBuffer<T>>, O), ImmutableBufferFromBufferWithBuilderError>
where B: Buffer + TypedBuffer<Content = T> + DeviceOwned, // TODO: remove + DeviceOwned once Buffer requires it
I: IntoIterator<Item = QueueFamily<'a>>,
Cb: CommandBufferBuilder +
AddCommand<CmdCopyBuffer<B::Access, ImmutableBufferInitialization<T>>, Out = O>,
{
unsafe {
// We automatically set `transfer_dest` to true in order to avoid annoying errors.
@ -157,10 +121,18 @@ impl<T: ?Sized> ImmutableBuffer<T> {
};
let (buffer, init) = ImmutableBuffer::raw(source.device().clone(), source.size(),
actual_usage, queue_families)?;
actual_usage, queue_families)?;
let builder = builder.copy_buffer(source, init)?;
Ok((buffer, builder))
let cb = AutoCommandBufferBuilder::new(source.device().clone(), queue.family())?
.copy_buffer(source, init).unwrap() // TODO: return error?
.build()?;
let future = match cb.execute(queue) {
Ok(f) => f,
Err(_) => unreachable!()
};
Ok((buffer, future))
}
}
}
@ -465,7 +437,8 @@ impl<T: ?Sized, A> Drop for ImmutableBufferInitialization<T, A> {
}
}
/// Error that can happen when creating a `CmdCopyBuffer`.
// TODO:
/*/// Error that can happen when creating a `CmdCopyBuffer`.
#[derive(Debug, Copy, Clone)]
pub enum ImmutableBufferFromBufferWithBuilderError {
/// Out of memory.
@ -515,7 +488,7 @@ impl From<CommandBufferBuilderError<CmdCopyBufferError>> for ImmutableBufferFrom
fn from(err: CommandBufferBuilderError<CmdCopyBufferError>) -> ImmutableBufferFromBufferWithBuilderError {
ImmutableBufferFromBufferWithBuilderError::CommandBufferBuilderError(err)
}
}
}*/
#[cfg(test)]
mod tests {

View File

@ -7,34 +7,57 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::error;
use std::fmt;
use std::iter;
use std::mem;
use std::sync::Arc;
use buffer::Buffer;
use buffer::BufferAccess;
use command_buffer::cb;
use command_buffer::commands_raw;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::cb::UnsafeCommandBuffer;
use command_buffer::CommandAddError;
use buffer::TypedBufferAccess;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferBuilder;
use command_buffer::CommandBufferExecError;
use command_buffer::DynamicState;
use command_buffer::StateCacher;
use command_buffer::StateCacherOutcome;
use command_buffer::pool::CommandPool;
use command_buffer::pool::StandardCommandPool;
use command_buffer::pool::CommandPoolBuilderAlloc;
use command_buffer::pool::standard::StandardCommandPool;
use command_buffer::pool::standard::StandardCommandPoolAlloc;
use command_buffer::pool::standard::StandardCommandPoolBuilder;
use command_buffer::synced::SyncCommandBuffer;
use command_buffer::synced::SyncCommandBufferBuilder;
use command_buffer::synced::SyncCommandBufferBuilderError;
use command_buffer::synced::SyncCommandBufferBuilderBindVertexBuffer;
use command_buffer::sys::Flags;
use command_buffer::sys::Kind;
use command_buffer::sys::UnsafeCommandBuffer;
use command_buffer::sys::UnsafeCommandBufferBuilderBufferImageCopy;
use command_buffer::sys::UnsafeCommandBufferBuilderImageAspect;
use command_buffer::validity;
use descriptor::descriptor_set::DescriptorSetsCollection;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use framebuffer::FramebufferAbstract;
use framebuffer::RenderPassDescClearValues;
use framebuffer::RenderPassAbstract;
use image::Image;
use image::ImageLayout;
use image::ImageAccess;
use instance::QueueFamily;
use pipeline::ComputePipelineAbstract;
use pipeline::GraphicsPipelineAbstract;
use pipeline::input_assembly::Index;
use pipeline::vertex::VertexSource;
use sync::AccessCheckError;
use sync::AccessFlagBits;
use sync::PipelineStages;
use sync::GpuFuture;
use OomError;
type Cb<P> = cb::DeviceCheckLayer<cb::QueueTyCheckLayer<cb::ContextCheckLayer<cb::StateCacheLayer<cb::SubmitSyncBuilderLayer<cb::AutoPipelineBarriersLayer<cb::AbstractStorageLayer<cb::UnsafeCommandBufferBuilder<P>>>>>>>>;
///
///
/// Note that command buffers allocated from the default command pool (`Arc<StandardCommandPool>`)
@ -42,56 +65,356 @@ type Cb<P> = cb::DeviceCheckLayer<cb::QueueTyCheckLayer<cb::ContextCheckLayer<cb
/// `AutoCommandBufferBuilder` will not implement `Send` and `Sync` either. Once a command buffer
/// is built, however, it *does* implement `Send` and `Sync`.
///
pub struct AutoCommandBufferBuilder<P = Arc<StandardCommandPool>> where P: CommandPool {
inner: Cb<P>
pub struct AutoCommandBufferBuilder<P = StandardCommandPoolBuilder> {
inner: SyncCommandBufferBuilder<P>,
state_cacher: StateCacher,
}
impl AutoCommandBufferBuilder<Arc<StandardCommandPool>> {
impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
pub fn new(device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<Arc<StandardCommandPool>>, OomError>
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
let pool = Device::standard_command_pool(&device, queue_family);
unsafe {
let pool = Device::standard_command_pool(&device, queue_family);
let inner = SyncCommandBufferBuilder::new(&pool, Kind::primary(), Flags::None);
let state_cacher = StateCacher::new();
let cmd = unsafe {
let c = try!(cb::UnsafeCommandBufferBuilder::new(&pool, cb::Kind::primary(), cb::Flags::SimultaneousUse /* TODO: */));
let c = cb::AbstractStorageLayer::new(c);
let c = cb::AutoPipelineBarriersLayer::new(c);
let c = cb::SubmitSyncBuilderLayer::new(c, cb::SubmitSyncBuilderLayerBehavior::UseLayoutHint);
let c = cb::StateCacheLayer::new(c);
let c = cb::ContextCheckLayer::new(c, false, true);
let c = cb::QueueTyCheckLayer::new(c);
let c = cb::DeviceCheckLayer::new(c);
c
};
Ok(AutoCommandBufferBuilder {
inner: inner?,
state_cacher: state_cacher,
})
}
}
}
Ok(AutoCommandBufferBuilder {
inner: cmd,
impl<P> AutoCommandBufferBuilder<P> {
/// Builds the command buffer.
#[inline]
pub fn build(self) -> Result<AutoCommandBuffer<P::Alloc>, OomError>
where P: CommandPoolBuilderAlloc
{
// TODO: error if we're inside a render pass
Ok(AutoCommandBuffer {
inner: self.inner.build()?
})
}
}
unsafe impl<P, O, E> CommandBufferBuild for AutoCommandBufferBuilder<P>
where Cb<P>: CommandBufferBuild<Out = O, Err = E>,
P: CommandPool
{
type Out = O;
type Err = E;
/// Adds a command that enters a render pass.
///
/// If `secondary` is true, then you will only be able to add secondary command buffers while
/// you're inside the first subpass of the render pass. If `secondary` is false, you will only
/// be able to add inline draw commands and not secondary command buffers.
///
/// You must call this before you can add draw commands.
#[inline]
pub fn begin_render_pass<F, C>(mut self, framebuffer: F, secondary: bool,
clear_values: C)
-> Result<Self, AutoCommandBufferBuilderContextError>
where F: FramebufferAbstract + RenderPassDescClearValues<C> + Send + Sync + 'static
{
unsafe {
let clear_values = framebuffer.convert_clear_values(clear_values);
self.inner.begin_render_pass(framebuffer, secondary, clear_values);
Ok(self)
}
}
/// Adds a command that copies from a buffer to another.
///
/// This command will copy from the source to the destination. If their size is not equal, then
/// the amount of data copied is equal to the smallest of the two.
#[inline]
pub fn copy_buffer<S, D>(mut self, src: S, dest: D) -> Result<Self, validity::CheckCopyBufferError>
where S: Buffer,
S::Access: Send + Sync + 'static,
D: Buffer,
D::Access: Send + Sync + 'static,
{
unsafe {
let src = src.access();
let dest = dest.access();
// TODO: check that we're not in a render pass
validity::check_copy_buffer(self.device(), &src, &dest)?;
let size = src.size();
self.inner.copy_buffer(src, dest, iter::once((0, 0, size)));
Ok(self)
}
}
/// Adds a command that copies from a buffer to an image.
pub fn copy_buffer_to_image<S, D>(mut self, src: S, dest: D)
-> Result<Self, AutoCommandBufferBuilderContextError>
where S: Buffer,
S::Access: Send + Sync + 'static,
D: Image,
D::Access: Send + Sync + 'static,
{
let dims = dest.dimensions().width_height_depth();
self.copy_buffer_to_image_dimensions(src, dest, [0, 0, 0], dims, 0, 1, 0)
}
/// Adds a command that copies from a buffer to an image.
pub fn copy_buffer_to_image_dimensions<S, D>(mut self, src: S, dest: D, offset: [u32; 3],
size: [u32; 3], first_layer: u32, num_layers: u32,
mipmap: u32)
-> Result<Self, AutoCommandBufferBuilderContextError>
where S: Buffer,
S::Access: Send + Sync + 'static,
D: Image,
D::Access: Send + Sync + 'static,
{
unsafe {
let src = src.access();
let dest = dest.access();
// TODO: check that we're not in a render pass
// TODO: check validity
// TODO: hastily implemented
let copy = UnsafeCommandBufferBuilderBufferImageCopy {
buffer_offset: 0,
buffer_row_length: 0,
buffer_image_height: 0,
image_aspect: if dest.has_color() {
UnsafeCommandBufferBuilderImageAspect { color: true, depth: false, stencil: false }
} else {
unimplemented!()
},
image_mip_level: mipmap,
image_base_array_layer: first_layer,
image_layer_count: num_layers,
image_offset: [offset[0] as i32, offset[1] as i32, offset[2] as i32],
image_extent: size,
};
let size = src.size();
self.inner.copy_buffer_to_image(src, dest, ImageLayout::TransferDstOptimal, // TODO: let choose layout
iter::once(copy));
Ok(self)
}
}
#[inline]
fn build(self) -> Result<O, E> {
// TODO: wrap around?
CommandBufferBuild::build(self.inner)
pub fn draw<V, Gp, S, Pc>(mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, sets: S,
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection,
{
unsafe {
// TODO: missing checks
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
self.inner.bind_pipeline_graphics(pipeline.clone());
}
push_constants(&mut self.inner, pipeline.clone(), constants);
set_state(&mut self.inner, dynamic);
descriptor_sets(&mut self.inner, true, pipeline.clone(), sets);
let (vertex_count, instance_count) = vertex_buffers(&mut self.inner, &pipeline,
vertices);
self.inner.draw(vertex_count as u32, instance_count as u32, 0, 0);
Ok(self)
}
}
#[inline]
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(mut self, pipeline: Gp, dynamic: DynamicState,
vertices: V, index_buffer: Ib, sets: S,
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection,
Ib: Buffer,
Ib::Access: TypedBufferAccess<Content = [I]> + Send + Sync + 'static,
I: Index + 'static,
{
unsafe {
// TODO: missing checks
let index_buffer = index_buffer.access();
let index_count = index_buffer.len();
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
self.inner.bind_pipeline_graphics(pipeline.clone());
}
self.inner.bind_index_buffer(index_buffer, I::ty());
push_constants(&mut self.inner, pipeline.clone(), constants);
set_state(&mut self.inner, dynamic);
descriptor_sets(&mut self.inner, true, pipeline.clone(), sets);
vertex_buffers(&mut self.inner, &pipeline, vertices);
// TODO: how to handle an index out of range of the vertex buffers?
self.inner.draw_indexed(index_count as u32, 1, 0, 0, 0);
Ok(self)
}
}
// TODO: draw_indirect
// TODO: dispatch
/// Adds a command that ends the current render pass.
///
/// This must be called after you went through all the subpasses and before you can build
/// the command buffer or add further commands.
#[inline]
pub fn end_render_pass(mut self) -> Result<Self, AutoCommandBufferBuilderContextError> {
unsafe {
// TODO: check
self.inner.end_render_pass();
Ok(self)
}
}
/// Adds a command that writes the content of a buffer.
///
/// This function is similar to the `memset` function in C. The `data` parameter is a number
/// that will be repeatidely written through the entire buffer.
///
/// > **Note**: This function is technically safe because buffers can only contain integers or
/// > floating point numbers, which are always valid whatever their memory representation is.
/// > But unless your buffer actually contains only 32-bits integers, you are encouraged to use
/// > this function only for zeroing the content of a buffer by passing `0` for the data.
// TODO: not safe because of signalling NaNs
#[inline]
pub fn fill_buffer<B>(mut self, buffer: B, data: u32) -> Result<Self, validity::CheckFillBufferError>
where B: Buffer,
B::Access: Send + Sync + 'static,
{
unsafe {
// TODO: check that we're not in a render pass
let buffer = buffer.access();
validity::check_fill_buffer(self.device(), &buffer)?;
self.inner.fill_buffer(buffer, data);
Ok(self)
}
}
/// Adds a command that jumps to the next subpass of the current render pass.
#[inline]
pub fn next_subpass(mut self, secondary: bool)
-> Result<Self, AutoCommandBufferBuilderContextError>
{
unsafe {
// TODO: check
self.inner.next_subpass(secondary);
Ok(self)
}
}
/// Adds a command that writes data to a buffer.
///
/// If `data` is larger than the buffer, only the part of `data` that fits is written. If the
/// buffer is larger than `data`, only the start of the buffer is written.
#[inline]
pub fn update_buffer<B, D>(mut self, buffer: B, data: &D)
-> Result<Self, validity::CheckUpdateBufferError>
where B: Buffer,
B::Access: Send + Sync + 'static,
D: ?Sized
{
unsafe {
// TODO: check that we're not in a render pass
let buffer = buffer.access();
validity::check_update_buffer(self.device(), &buffer, data)?;
let size_of_data = mem::size_of_val(data);
if buffer.size() > size_of_data {
self.inner.update_buffer(buffer, data);
} else {
unimplemented!() // TODO:
//self.inner.update_buffer(buffer.slice(0 .. size_of_data), data);
}
Ok(self)
}
}
}
unsafe impl<P> CommandBuffer for AutoCommandBufferBuilder<P>
where Cb<P>: CommandBuffer,
P: CommandPool
unsafe impl<P> DeviceOwned for AutoCommandBufferBuilder<P> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
// Shortcut function to set the push constants.
unsafe fn push_constants<P, Pl, Pc>(dest: &mut SyncCommandBufferBuilder<P>, pipeline: Pl,
push_constants: Pc)
where Pl: PipelineLayoutAbstract + Send + Sync + Clone + 'static
{
type Pool = <Cb<P> as CommandBuffer>::Pool;
for num_range in 0 .. pipeline.num_push_constants_ranges() {
let range = match pipeline.push_constants_range(num_range) {
Some(r) => r,
None => continue
};
debug_assert_eq!(range.offset % 4, 0);
debug_assert_eq!(range.size % 4, 0);
dest.push_constants(pipeline.clone(), range.stages,
range.offset as u32, range.size as u32,
&*((&push_constants as *const Pc as *const u8)
.offset(range.offset as isize)));
}
}
// Shortcut function to change the state of the pipeline.
unsafe fn set_state<P>(dest: &mut SyncCommandBufferBuilder<P>, dynamic: DynamicState) {
if let Some(line_width) = dynamic.line_width {
dest.set_line_width(line_width);
}
if let Some(ref viewports) = dynamic.viewports {
dest.set_viewport(0, viewports.iter().cloned());
}
if let Some(ref scissors) = dynamic.scissors {
dest.set_scissor(0, scissors.iter().cloned());
}
}
// Shortcut function to bind vertex buffers.
unsafe fn vertex_buffers<P, Gp, V>(dest: &mut SyncCommandBufferBuilder<P>, pipeline: &Gp,
vertices: V) -> (u32, u32)
where Gp: VertexSource<V>,
{
let (vertex_buffers, vertex_count, instance_count) = pipeline.decode(vertices);
let mut binder = dest.bind_vertex_buffers();
for vb in vertex_buffers {
binder.add(vb);
}
binder.submit(0);
(vertex_count as u32, instance_count as u32)
}
unsafe fn descriptor_sets<P, Pl, S>(dest: &mut SyncCommandBufferBuilder<P>, gfx: bool,
pipeline: Pl, sets: S)
where Pl: PipelineLayoutAbstract + Send + Sync + Clone + 'static,
S: DescriptorSetsCollection
{
let mut sets_binder = dest.bind_descriptor_sets();
for set in sets.into_vec() {
sets_binder.add(set);
}
sets_binder.submit(gfx, pipeline.clone(), 0, iter::empty());
}
pub struct AutoCommandBuffer<P = StandardCommandPoolAlloc> {
inner: SyncCommandBuffer<P>,
}
unsafe impl<P> CommandBuffer for AutoCommandBuffer<P> {
type PoolAlloc = P;
#[inline]
fn inner(&self) -> &UnsafeCommandBuffer<Self::Pool> {
fn inner(&self) -> &UnsafeCommandBuffer<P> {
self.inner.inner()
}
@ -115,60 +438,42 @@ unsafe impl<P> CommandBuffer for AutoCommandBufferBuilder<P>
}
}
unsafe impl<P> DeviceOwned for AutoCommandBufferBuilder<P>
where Cb<P>: DeviceOwned,
P: CommandPool
{
unsafe impl<P> DeviceOwned for AutoCommandBuffer<P> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<P> CommandBufferBuilder for AutoCommandBufferBuilder<P>
where Cb<P>: CommandBufferBuilder,
P: CommandPool
{
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
macro_rules! err_gen {
($name:ident) => (
pub enum $name {
SyncCommandBufferBuilderError(SyncCommandBufferBuilderError),
}
);
}
macro_rules! pass_through {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<P $(, $param)*> AddCommand<$cmd> for AutoCommandBufferBuilder<P>
where P: CommandPool,
Cb<P>: AddCommand<$cmd, Out = Cb<P>>
{
type Out = AutoCommandBufferBuilder<P>;
err_gen!(Foo);
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
Ok(AutoCommandBufferBuilder {
inner: self.inner.add(command)?,
})
}
#[derive(Debug, Copy, Clone)]
pub enum AutoCommandBufferBuilderContextError {
Forbidden,
}
impl error::Error for AutoCommandBufferBuilderContextError {
#[inline]
fn description(&self) -> &str {
match *self {
AutoCommandBufferBuilderContextError::Forbidden => {
"operation forbidden inside or outside of a render pass"
},
}
}
}
pass_through!((Rp, F), commands_raw::CmdBeginRenderPass<Rp, F>);
pass_through!((S, Pl), commands_raw::CmdBindDescriptorSets<S, Pl>);
pass_through!((B), commands_raw::CmdBindIndexBuffer<B>);
pass_through!((Pl), commands_raw::CmdBindPipeline<Pl>);
pass_through!((V), commands_raw::CmdBindVertexBuffers<V>);
pass_through!((), commands_raw::CmdClearAttachments);
pass_through!((S, D), commands_raw::CmdCopyBuffer<S, D>);
pass_through!((S, D), commands_raw::CmdCopyBufferToImage<S, D>);
pass_through!((), commands_raw::CmdDispatchRaw);
pass_through!((), commands_raw::CmdDrawRaw);
pass_through!((), commands_raw::CmdDrawIndexedRaw);
pass_through!((B), commands_raw::CmdDrawIndirectRaw<B>);
pass_through!((), commands_raw::CmdEndRenderPass);
pass_through!((C), commands_raw::CmdExecuteCommands<C>);
pass_through!((B), commands_raw::CmdFillBuffer<B>);
pass_through!((), commands_raw::CmdNextSubpass);
pass_through!((Pc, Pl), commands_raw::CmdPushConstants<Pc, Pl>);
pass_through!((), commands_raw::CmdSetState);
pass_through!((B, D), commands_raw::CmdUpdateBuffer<B, D>);
impl fmt::Display for AutoCommandBufferBuilderContextError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}

View File

@ -1,153 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::any::Any;
use std::sync::Arc;
use buffer::BufferAccess;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::cb::UnsafeCommandBuffer;
use command_buffer::commands_raw;
use command_buffer::CommandAddError;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferBuilder;
use command_buffer::CommandBufferExecError;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageLayout;
use image::ImageAccess;
use instance::QueueFamily;
use sync::AccessCheckError;
use sync::AccessFlagBits;
use sync::GpuFuture;
use sync::PipelineStages;
/// Layer that stores commands in an abstract way.
pub struct AbstractStorageLayer<I> {
inner: I,
commands: Vec<Box<Any + Send + Sync>>,
}
impl<I> AbstractStorageLayer<I> {
/// Builds a new `AbstractStorageLayer`.
#[inline]
pub fn new(inner: I) -> AbstractStorageLayer<I> {
AbstractStorageLayer {
inner: inner,
commands: Vec::new(),
}
}
}
unsafe impl<I> CommandBuffer for AbstractStorageLayer<I> where I: CommandBuffer {
type Pool = I::Pool;
#[inline]
fn inner(&self) -> &UnsafeCommandBuffer<I::Pool> {
self.inner.inner()
}
#[inline]
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
self.inner.prepare_submit(future, queue)
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
self.inner.check_buffer_access(buffer, exclusive, queue)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
self.inner.check_image_access(image, layout, exclusive, queue)
}
}
unsafe impl<I> DeviceOwned for AbstractStorageLayer<I> where I: DeviceOwned {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<I, O, E> CommandBufferBuild for AbstractStorageLayer<I>
where I: CommandBufferBuild<Out = O, Err = E>
{
type Out = AbstractStorageLayer<O>;
type Err = E;
#[inline]
fn build(self) -> Result<Self::Out, E> {
let inner = try!(self.inner.build());
Ok(AbstractStorageLayer {
inner: inner,
commands: self.commands,
})
}
}
unsafe impl<I> CommandBufferBuilder for AbstractStorageLayer<I> where I: CommandBufferBuilder {
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
}
macro_rules! pass_through {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<I $(, $param)*> AddCommand<$cmd> for AbstractStorageLayer<I>
where I: for<'r> AddCommand<&'r $cmd, Out = I>, $cmd: Send + Sync + 'static
{
type Out = AbstractStorageLayer<I>;
#[inline]
fn add(mut self, command: $cmd) -> Result<Self::Out, CommandAddError> {
let new_inner = AddCommand::add(self.inner, &command)?;
// TODO: should store a lightweight version of the command
self.commands.push(Box::new(command) as Box<_>);
Ok(AbstractStorageLayer {
inner: new_inner,
commands: self.commands,
})
}
}
}
}
pass_through!((Rp, F), commands_raw::CmdBeginRenderPass<Rp, F>);
pass_through!((S, Pl), commands_raw::CmdBindDescriptorSets<S, Pl>);
pass_through!((B), commands_raw::CmdBindIndexBuffer<B>);
pass_through!((Pl), commands_raw::CmdBindPipeline<Pl>);
pass_through!((V), commands_raw::CmdBindVertexBuffers<V>);
pass_through!((S, D), commands_raw::CmdBlitImage<S, D>);
pass_through!((), commands_raw::CmdClearAttachments);
pass_through!((S, D), commands_raw::CmdCopyBuffer<S, D>);
pass_through!((S, D), commands_raw::CmdCopyBufferToImage<S, D>);
pass_through!((S, D), commands_raw::CmdCopyImage<S, D>);
pass_through!((), commands_raw::CmdDispatchRaw);
pass_through!((), commands_raw::CmdDrawIndexedRaw);
pass_through!((B), commands_raw::CmdDrawIndirectRaw<B>);
pass_through!((), commands_raw::CmdDrawRaw);
pass_through!((), commands_raw::CmdEndRenderPass);
pass_through!((C), commands_raw::CmdExecuteCommands<C>);
pass_through!((B), commands_raw::CmdFillBuffer<B>);
pass_through!((), commands_raw::CmdNextSubpass);
pass_through!((Pc, Pl), commands_raw::CmdPushConstants<Pc, Pl>);
pass_through!((S, D), commands_raw::CmdResolveImage<S, D>);
pass_through!((), commands_raw::CmdSetEvent);
pass_through!((), commands_raw::CmdSetState);
pass_through!((B, D), commands_raw::CmdUpdateBuffer<B, D>);

View File

@ -1,116 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::CommandAddError;
use command_buffer::CommandBufferBuilder;
use command_buffer::commands_raw;
use device::Device;
use device::DeviceOwned;
use instance::QueueFamily;
pub struct AutoPipelineBarriersLayer<I> {
inner: I,
}
impl<I> AutoPipelineBarriersLayer<I> {
#[inline]
pub fn new(inner: I) -> AutoPipelineBarriersLayer<I> {
AutoPipelineBarriersLayer {
inner: inner,
}
}
}
/*unsafe impl<C, I, L> AddCommand<C> for AutoPipelineBarriersLayer<I, L>
where I: for<'r> AddCommand<&'r C, Out = I>
{
type Out = AutoPipelineBarriersLayer<I, (L, C)>;
#[inline]
fn add(self, command: C) -> Self::Out {
AutoPipelineBarriersLayer {
inner: AddCommand::add(self.inner, command),
}
}
}*/
unsafe impl<I, O, E> CommandBufferBuild for AutoPipelineBarriersLayer<I>
where I: CommandBufferBuild<Out = O, Err = E>
{
type Out = O;
type Err = E;
#[inline]
fn build(self) -> Result<O, E> {
self.inner.build()
}
}
unsafe impl<I> DeviceOwned for AutoPipelineBarriersLayer<I>
where I: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<I> CommandBufferBuilder for AutoPipelineBarriersLayer<I>
where I: CommandBufferBuilder
{
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
}
macro_rules! pass_through {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<I, O $(, $param)*> AddCommand<$cmd> for AutoPipelineBarriersLayer<I>
where I: for<'r> AddCommand<$cmd, Out = O>
{
type Out = AutoPipelineBarriersLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
Ok(AutoPipelineBarriersLayer {
inner: AddCommand::add(self.inner, command)?,
})
}
}
}
}
pass_through!((Rp, F), commands_raw::CmdBeginRenderPass<Rp, F>);
pass_through!((S, Pl), commands_raw::CmdBindDescriptorSets<S, Pl>);
pass_through!((B), commands_raw::CmdBindIndexBuffer<B>);
pass_through!((Pl), commands_raw::CmdBindPipeline<Pl>);
pass_through!((V), commands_raw::CmdBindVertexBuffers<V>);
pass_through!((S, D), commands_raw::CmdBlitImage<S, D>);
pass_through!((), commands_raw::CmdClearAttachments);
pass_through!((S, D), commands_raw::CmdCopyBuffer<S, D>);
pass_through!((S, D), commands_raw::CmdCopyBufferToImage<S, D>);
pass_through!((S, D), commands_raw::CmdCopyImage<S, D>);
pass_through!((), commands_raw::CmdDispatchRaw);
pass_through!((), commands_raw::CmdDrawRaw);
pass_through!((), commands_raw::CmdDrawIndexedRaw);
pass_through!((B), commands_raw::CmdDrawIndirectRaw<B>);
pass_through!((), commands_raw::CmdEndRenderPass);
pass_through!((C), commands_raw::CmdExecuteCommands<C>);
pass_through!((B), commands_raw::CmdFillBuffer<B>);
pass_through!((), commands_raw::CmdNextSubpass);
pass_through!((Pc, Pl), commands_raw::CmdPushConstants<Pc, Pl>);
pass_through!((S, D), commands_raw::CmdResolveImage<S, D>);
pass_through!((), commands_raw::CmdSetEvent);
pass_through!((), commands_raw::CmdSetState);
pass_through!((B, D), commands_raw::CmdUpdateBuffer<B, D>);

View File

@ -1,269 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::CommandAddError;
use command_buffer::CommandBufferBuilder;
use command_buffer::commands_raw;
use device::Device;
use device::DeviceOwned;
use instance::QueueFamily;
/// Layer around a command buffer builder that checks whether the commands can be executed in the
/// given context related to render passes.
///
/// What is checked exactly:
///
/// - When adding a command that can only be executed within a render pass or outside of a render
/// pass, checks that we are within or outside of a render pass.
/// - When leaving the render pass or going to the next subpass, makes sure that the number of
/// subpasses of the current render pass is respected.
/// - When binding a graphics pipeline or drawing, makes sure that the pipeline is valid for the
/// current render pass.
///
pub struct ContextCheckLayer<I> {
// Inner command buffer builder.
inner: I,
// True if we are currently inside a render pass.
inside_render_pass: bool,
// True if entering/leaving a render pass or going to the next subpass is allowed.
allow_render_pass_ops: bool,
}
impl<I> ContextCheckLayer<I> {
/// Builds a new `ContextCheckLayer`.
///
/// If `allow_render_pass_ops` is true, then entering/leaving a render pass or going to the
/// next subpass is allowed by the layer.
///
/// If `inside_render_pass` is true, then the builder is currently inside a render pass.
///
/// Note that this layer will only protect you if you pass correct values in this constructor.
/// It is not unsafe to pass wrong values, but if you do so then the layer will be inefficient
/// as a safety tool.
#[inline]
pub fn new(inner: I, inside_render_pass: bool, allow_render_pass_ops: bool)
-> ContextCheckLayer<I>
{
ContextCheckLayer {
inner: inner,
inside_render_pass: inside_render_pass,
allow_render_pass_ops: allow_render_pass_ops,
}
}
/// Destroys the layer and returns the underlying command buffer.
#[inline]
pub fn into_inner(self) -> I {
self.inner
}
}
unsafe impl<I, O, E> CommandBufferBuild for ContextCheckLayer<I>
where I: CommandBufferBuild<Out = O, Err = E>
{
type Out = O;
type Err = E;
#[inline]
fn build(self) -> Result<O, E> {
self.inner.build()
}
}
unsafe impl<I> DeviceOwned for ContextCheckLayer<I>
where I: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<I> CommandBufferBuilder for ContextCheckLayer<I>
where I: CommandBufferBuilder
{
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
}
// TODO:
// impl!((C), commands_raw::CmdExecuteCommands<C>);
// FIXME: must also check that a pipeline's render pass matches the render pass
// FIXME:
// > If the variable multisample rate feature is not supported, pipeline is a graphics pipeline,
// > the current subpass has no attachments, and this is not the first call to this function with
// > a graphics pipeline after transitioning to the current subpass, then the sample count
// > specified by this pipeline must match that set in the previous pipeline
macro_rules! impl_always {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for ContextCheckLayer<I>
where I: AddCommand<$cmd, Out = O>
{
type Out = ContextCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
Ok(ContextCheckLayer {
inner: self.inner.add(command)?,
inside_render_pass: self.inside_render_pass,
allow_render_pass_ops: self.allow_render_pass_ops,
})
}
}
}
}
impl_always!((S, Pl), commands_raw::CmdBindDescriptorSets<S, Pl>);
impl_always!((B), commands_raw::CmdBindIndexBuffer<B>);
impl_always!((Pl), commands_raw::CmdBindPipeline<Pl>);
impl_always!((V), commands_raw::CmdBindVertexBuffers<V>);
impl_always!((Pc, Pl), commands_raw::CmdPushConstants<Pc, Pl>);
impl_always!((), commands_raw::CmdSetState);
macro_rules! impl_inside_only {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for ContextCheckLayer<I>
where I: AddCommand<$cmd, Out = O>
{
type Out = ContextCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
if !self.inside_render_pass {
return Err(CommandAddError::ForbiddenOutsideRenderPass);
}
Ok(ContextCheckLayer {
inner: self.inner.add(command)?,
inside_render_pass: self.inside_render_pass,
allow_render_pass_ops: self.allow_render_pass_ops,
})
}
}
}
}
impl_inside_only!((), commands_raw::CmdClearAttachments);
impl_inside_only!((), commands_raw::CmdDrawIndexedRaw);
impl_inside_only!((B), commands_raw::CmdDrawIndirectRaw<B>);
impl_inside_only!((), commands_raw::CmdDrawRaw);
macro_rules! impl_outside_only {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for ContextCheckLayer<I>
where I: AddCommand<$cmd, Out = O>
{
type Out = ContextCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
if self.inside_render_pass {
return Err(CommandAddError::ForbiddenInsideRenderPass);
}
Ok(ContextCheckLayer {
inner: self.inner.add(command)?,
inside_render_pass: self.inside_render_pass,
allow_render_pass_ops: self.allow_render_pass_ops,
})
}
}
}
}
impl_outside_only!((S, D), commands_raw::CmdBlitImage<S, D>);
impl_outside_only!((S, D), commands_raw::CmdCopyBuffer<S, D>);
impl_outside_only!((S, D), commands_raw::CmdCopyBufferToImage<S, D>);
impl_outside_only!((S, D), commands_raw::CmdCopyImage<S, D>);
impl_outside_only!((), commands_raw::CmdDispatchRaw);
impl_outside_only!((B), commands_raw::CmdFillBuffer<B>);
impl_outside_only!((S, D), commands_raw::CmdResolveImage<S, D>);
impl_outside_only!((), commands_raw::CmdSetEvent);
impl_outside_only!((B, D), commands_raw::CmdUpdateBuffer<B, D>);
unsafe impl<'a, I, O, Rp, F> AddCommand<commands_raw::CmdBeginRenderPass<Rp, F>> for ContextCheckLayer<I>
where I: AddCommand<commands_raw::CmdBeginRenderPass<Rp, F>, Out = O>
{
type Out = ContextCheckLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdBeginRenderPass<Rp, F>) -> Result<Self::Out, CommandAddError> {
if self.inside_render_pass {
return Err(CommandAddError::ForbiddenInsideRenderPass);
}
if !self.allow_render_pass_ops {
return Err(CommandAddError::ForbiddenInSecondaryCommandBuffer);
}
Ok(ContextCheckLayer {
inner: self.inner.add(command)?,
inside_render_pass: true,
allow_render_pass_ops: true,
})
}
}
unsafe impl<'a, I, O> AddCommand<commands_raw::CmdNextSubpass> for ContextCheckLayer<I>
where I: AddCommand<commands_raw::CmdNextSubpass, Out = O>
{
type Out = ContextCheckLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdNextSubpass) -> Result<Self::Out, CommandAddError> {
if !self.inside_render_pass {
return Err(CommandAddError::ForbiddenOutsideRenderPass);
}
if !self.allow_render_pass_ops {
return Err(CommandAddError::ForbiddenInSecondaryCommandBuffer);
}
// FIXME: check number of subpasses
Ok(ContextCheckLayer {
inner: self.inner.add(command)?,
inside_render_pass: true,
allow_render_pass_ops: true,
})
}
}
unsafe impl<'a, I, O> AddCommand<commands_raw::CmdEndRenderPass> for ContextCheckLayer<I>
where I: AddCommand<commands_raw::CmdEndRenderPass, Out = O>
{
type Out = ContextCheckLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdEndRenderPass) -> Result<Self::Out, CommandAddError> {
if !self.inside_render_pass {
return Err(CommandAddError::ForbiddenOutsideRenderPass);
}
if !self.allow_render_pass_ops {
return Err(CommandAddError::ForbiddenInSecondaryCommandBuffer);
}
// FIXME: check number of subpasses
Ok(ContextCheckLayer {
inner: self.inner.add(command)?,
inside_render_pass: false,
allow_render_pass_ops: true,
})
}
}

View File

@ -1,131 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::CommandAddError;
use command_buffer::CommandBufferBuilder;
use command_buffer::commands_raw;
use device::Device;
use device::DeviceOwned;
use instance::QueueFamily;
use VulkanObject;
/// Layer around a command buffer builder that checks whether the commands added to it belong to
/// the same device as the command buffer.
pub struct DeviceCheckLayer<I> {
inner: I,
}
impl<I> DeviceCheckLayer<I> {
/// Builds a new `DeviceCheckLayer`.
#[inline]
pub fn new(inner: I) -> DeviceCheckLayer<I> {
DeviceCheckLayer {
inner: inner,
}
}
/// Destroys the layer and returns the underlying command buffer.
#[inline]
pub fn into_inner(self) -> I {
self.inner
}
}
unsafe impl<I> DeviceOwned for DeviceCheckLayer<I>
where I: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<I> CommandBufferBuilder for DeviceCheckLayer<I>
where I: CommandBufferBuilder
{
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
}
unsafe impl<I, O, E> CommandBufferBuild for DeviceCheckLayer<I>
where I: CommandBufferBuild<Out = O, Err = E>
{
type Out = O;
type Err = E;
#[inline]
fn build(self) -> Result<O, E> {
self.inner.build()
}
}
macro_rules! pass_through {
(($($param:ident),*), $cmd:ty) => (
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for DeviceCheckLayer<I>
where I: AddCommand<$cmd, Out = O> + DeviceOwned, $cmd: DeviceOwned
{
type Out = DeviceCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
let inner_device = self.inner.device().internal_object();
let cmd_device = command.device().internal_object();
assert_eq!(inner_device, cmd_device);
Ok(DeviceCheckLayer {
inner: self.inner.add(command)?,
})
}
}
);
(($($param:ident),*), $cmd:ty, no-device) => (
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for DeviceCheckLayer<I>
where I: AddCommand<$cmd, Out = O>
{
type Out = DeviceCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
Ok(DeviceCheckLayer {
inner: self.inner.add(command)?,
})
}
}
);
}
pass_through!((Rp, F), commands_raw::CmdBeginRenderPass<Rp, F>);
pass_through!((S, Pl), commands_raw::CmdBindDescriptorSets<S, Pl>);
pass_through!((B), commands_raw::CmdBindIndexBuffer<B>);
pass_through!((Pl), commands_raw::CmdBindPipeline<Pl>);
pass_through!((V), commands_raw::CmdBindVertexBuffers<V>);
pass_through!((S, D), commands_raw::CmdBlitImage<S, D>);
pass_through!((), commands_raw::CmdClearAttachments, no-device);
pass_through!((S, D), commands_raw::CmdCopyBuffer<S, D>);
pass_through!((S, D), commands_raw::CmdCopyBufferToImage<S, D>);
pass_through!((S, D), commands_raw::CmdCopyImage<S, D>);
pass_through!((), commands_raw::CmdDispatchRaw);
pass_through!((), commands_raw::CmdDrawIndexedRaw, no-device);
pass_through!((B), commands_raw::CmdDrawIndirectRaw<B>);
pass_through!((), commands_raw::CmdDrawRaw, no-device);
pass_through!((), commands_raw::CmdEndRenderPass, no-device);
pass_through!((C), commands_raw::CmdExecuteCommands<C>);
pass_through!((B), commands_raw::CmdFillBuffer<B>);
pass_through!((), commands_raw::CmdNextSubpass, no-device);
pass_through!((Pc, Pl), commands_raw::CmdPushConstants<Pc, Pl>);
pass_through!((S, D), commands_raw::CmdResolveImage<S, D>);
pass_through!((), commands_raw::CmdSetEvent);
pass_through!((), commands_raw::CmdSetState);
pass_through!((B, D), commands_raw::CmdUpdateBuffer<B, D>);

View File

@ -1,100 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Internals of vulkano's command buffers building.
//!
//! You probably don't need to look inside this module if you're a beginner. The
//! `AutoCommandBufferBuilder` provided in the parent module should be good for most needs.
//!
//! # Builder basics
//!
//! The lowest-level command buffer types are `UnsafeCommandBufferBuilder` and
//! `UnsafeCommandBuffer`. These two types have zero overhead over Vulkan command buffers but are
//! very unsafe to use.
//!
//! Before you can add a command to an unsafe command buffer builder, you should:
//!
//! - Make sure that the buffers or images used by the command stay alive for the duration of the
//! command buffer.
//! - Check that the device used by the buffers or images of the command is the same as the device
//! of the command buffer.
//! - If the command buffer is inside/outside a render pass, check that the command can be executed
//! inside/outside a render pass. Same for secondary command buffers.
//! - Check that the command can be executed on the queue family of the command buffer. Some queue
//! families don't support graphics and/or compute operations .
//! - Make sure that when the command buffer is submitted the buffers and images of the command
//! will be properly synchronized.
//! - Make sure that pipeline barriers are correctly inserted in order to avoid race conditions.
//!
//! In order to allow you to customize which checks are performed, vulkano provides *layers*. They
//! are structs that can be put around a command buffer builder and that will perform them. Keep
//! in mind that all the conditions above must be respected, but if you somehow make sure at
//! compile-time that some requirements are always correct, you can avoid paying some runtime cost
//! by not using all layers.
//!
//! Adding a command to a command buffer builder is done in two steps:
//!
//! - First you must build a struct that represents the command to add. The struct's constructor
//! can perform various checks to make sure that the command itself is valid, or it can provide
//! an unsafe constructor that doesn't perform any check.
//! - Then use the `AddCommand` trait to add it. The trait is implemented on the command buffer
//! builder and on the various layers, and its template parameter is the struct representing
//! the command.
//!
//! Since the `UnsafeCommandBufferBuilder` doesn't keep the command structs alive (as it would
//! incur an overhead), it implements `AddCommand<&T>`.
//!
//! The role of the `CommandsListLayer` and `BufferedCommandsListLayer` layers is to keep the
//! commands alive. They implement `AddCommand<T>` if the builder they wrap around implements
//! `AddCommand<&T>`. In other words they are the lowest level that you should put around an
//! `UnsafeCommandBufferBuilder`.
//!
//! The other layers of this module implement `AddCommand<T>` if the builder they wrap around
//! implements `AddCommand<T>`.
//!
//! # Building a command buffer
//!
//! Once you are satisfied with the commands you added to a builder, use the `CommandBufferBuild`
//! trait to build it.
//!
//! This trait is implemented on the `UnsafeCommandBufferBuilder` but also on all the layers.
//! The builder's layers can choose to add layers around the finished command buffer.
//!
//! # The `CommandsList` trait
//!
//! The `CommandsList` trait is implemented on any command buffer or command buffer builder that
//! exposes a list of commands. It is required by some of the layers.
pub use self::abstract_storage::AbstractStorageLayer;
pub use self::auto_barriers::AutoPipelineBarriersLayer;
pub use self::context_check::ContextCheckLayer;
pub use self::device_check::DeviceCheckLayer;
pub use self::queue_ty_check::QueueTyCheckLayer;
pub use self::state_cache::StateCacheLayer;
pub use self::submit_sync::SubmitSyncBuilderLayer;
pub use self::submit_sync::SubmitSyncBuilderLayerBehavior;
pub use self::submit_sync::SubmitSyncLayer;
pub use self::sys::Kind;
pub use self::sys::Flags;
pub use self::sys::UnsafeCommandBufferBuilder;
pub use self::sys::UnsafeCommandBuffer;
pub use self::traits::AddCommand;
// TODO: remove this line
pub use command_buffer::traits::CommandBufferBuild;
mod abstract_storage;
mod auto_barriers;
mod device_check;
mod context_check;
mod queue_ty_check;
mod state_cache;
mod submit_sync;
mod sys;
mod traits;

View File

@ -1,245 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::CommandAddError;
use command_buffer::CommandBufferBuilder;
use command_buffer::CommandBuffer;
use command_buffer::commands_raw;
use device::Device;
use device::DeviceOwned;
use instance::QueueFamily;
use VulkanObject;
/// Layer around a command buffer builder that checks whether the commands added to it match the
/// type of the queue family of the underlying builder.
///
/// Commands that perform graphical or compute operations can only be executed on queue families
/// that support graphical or compute operations. This is what this layer verifies.
pub struct QueueTyCheckLayer<I> {
inner: I,
}
impl<I> QueueTyCheckLayer<I> {
/// Builds a new `QueueTyCheckLayer`.
#[inline]
pub fn new(inner: I) -> QueueTyCheckLayer<I> {
QueueTyCheckLayer {
inner: inner,
}
}
/// Destroys the layer and returns the underlying command buffer.
#[inline]
pub fn into_inner(self) -> I {
self.inner
}
}
unsafe impl<I> DeviceOwned for QueueTyCheckLayer<I>
where I: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<I> CommandBufferBuilder for QueueTyCheckLayer<I>
where I: CommandBufferBuilder
{
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
}
unsafe impl<I, O, E> CommandBufferBuild for QueueTyCheckLayer<I>
where I: CommandBufferBuild<Out = O, Err = E>
{
type Out = O;
type Err = E;
#[inline]
fn build(self) -> Result<O, E> {
self.inner.build()
}
}
macro_rules! q_ty_impl_always {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for QueueTyCheckLayer<I>
where I: CommandBufferBuilder + AddCommand<$cmd, Out = O>
{
type Out = QueueTyCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
Ok(QueueTyCheckLayer {
inner: self.inner.add(command)?,
})
}
}
}
}
q_ty_impl_always!((S, D), commands_raw::CmdCopyBuffer<S, D>);
q_ty_impl_always!((S, D), commands_raw::CmdCopyBufferToImage<S, D>);
q_ty_impl_always!((S, D), commands_raw::CmdCopyImage<S, D>);
q_ty_impl_always!((B), commands_raw::CmdFillBuffer<B>);
q_ty_impl_always!((B, D), commands_raw::CmdUpdateBuffer<B, D>);
macro_rules! q_ty_impl_graphics {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for QueueTyCheckLayer<I>
where I: CommandBufferBuilder + AddCommand<$cmd, Out = O>
{
type Out = QueueTyCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
if !self.supports_graphics() {
return Err(CommandAddError::GraphicsOperationsNotSupported);
}
Ok(QueueTyCheckLayer {
inner: self.inner.add(command)?,
})
}
}
}
}
q_ty_impl_graphics!((Rp, F), commands_raw::CmdBeginRenderPass<Rp, F>);
q_ty_impl_graphics!((B), commands_raw::CmdBindIndexBuffer<B>);
q_ty_impl_graphics!((V), commands_raw::CmdBindVertexBuffers<V>);
q_ty_impl_graphics!((S, D), commands_raw::CmdBlitImage<S, D>);
q_ty_impl_graphics!((), commands_raw::CmdClearAttachments);
q_ty_impl_graphics!((), commands_raw::CmdDrawIndexedRaw);
q_ty_impl_graphics!((B), commands_raw::CmdDrawIndirectRaw<B>);
q_ty_impl_graphics!((), commands_raw::CmdDrawRaw);
q_ty_impl_graphics!((), commands_raw::CmdEndRenderPass);
q_ty_impl_graphics!((), commands_raw::CmdNextSubpass);
q_ty_impl_graphics!((S, D), commands_raw::CmdResolveImage<S, D>);
macro_rules! q_ty_impl_compute {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for QueueTyCheckLayer<I>
where I: CommandBufferBuilder + AddCommand<$cmd, Out = O>
{
type Out = QueueTyCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
if !self.supports_compute() {
return Err(CommandAddError::ComputeOperationsNotSupported);
}
Ok(QueueTyCheckLayer {
inner: self.inner.add(command)?,
})
}
}
}
}
q_ty_impl_compute!((), commands_raw::CmdDispatchRaw);
macro_rules! q_ty_impl_graphics_or_compute {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for QueueTyCheckLayer<I>
where I: CommandBufferBuilder + AddCommand<$cmd, Out = O>
{
type Out = QueueTyCheckLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
assert!(self.supports_graphics() || self.supports_compute()); // TODO: proper error?
Ok(QueueTyCheckLayer {
inner: self.inner.add(command)?,
})
}
}
}
}
q_ty_impl_graphics_or_compute!((Pc, Pl), commands_raw::CmdPushConstants<Pc, Pl>);
q_ty_impl_graphics_or_compute!((), commands_raw::CmdSetEvent);
q_ty_impl_graphics_or_compute!((), commands_raw::CmdSetState);
unsafe impl<I, O, Pl> AddCommand<commands_raw::CmdBindPipeline<Pl>> for QueueTyCheckLayer<I>
where I: CommandBufferBuilder + AddCommand<commands_raw::CmdBindPipeline<Pl>, Out = O>
{
type Out = QueueTyCheckLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdBindPipeline<Pl>) -> Result<Self::Out, CommandAddError> {
if command.is_graphics() {
if !self.supports_graphics() {
return Err(CommandAddError::GraphicsOperationsNotSupported);
}
} else {
if !self.supports_compute() {
return Err(CommandAddError::ComputeOperationsNotSupported);
}
}
Ok(QueueTyCheckLayer {
inner: self.inner.add(command)?,
})
}
}
unsafe impl<I, O, S, Pl> AddCommand<commands_raw::CmdBindDescriptorSets<S, Pl>> for QueueTyCheckLayer<I>
where I: CommandBufferBuilder + AddCommand<commands_raw::CmdBindDescriptorSets<S, Pl>, Out = O>
{
type Out = QueueTyCheckLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdBindDescriptorSets<S, Pl>) -> Result<Self::Out, CommandAddError> {
if command.is_graphics() {
if !self.supports_graphics() {
return Err(CommandAddError::GraphicsOperationsNotSupported);
}
} else {
if !self.supports_compute() {
return Err(CommandAddError::ComputeOperationsNotSupported);
}
}
Ok(QueueTyCheckLayer {
inner: self.inner.add(command)?,
})
}
}
unsafe impl<I, O, C> AddCommand<commands_raw::CmdExecuteCommands<C>> for QueueTyCheckLayer<I>
where I: CommandBufferBuilder + AddCommand<commands_raw::CmdExecuteCommands<C>, Out = O>,
C: CommandBuffer
{
type Out = QueueTyCheckLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdExecuteCommands<C>) -> Result<Self::Out, CommandAddError> {
// Note that safety rules guarantee that the secondary command buffer belongs to the same
// device as ourselves. Therefore this assert is only a debug assert.
debug_assert_eq!(command.command_buffer().queue_family().physical_device().internal_object(),
self.queue_family().physical_device().internal_object());
if command.command_buffer().queue_family().id() != self.queue_family().id() {
return Err(CommandAddError::QueueFamilyMismatch);
}
Ok(QueueTyCheckLayer {
inner: self.inner.add(command)?,
})
}
}

View File

@ -1,266 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::CommandAddError;
use command_buffer::CommandBufferBuilder;
use command_buffer::commands_raw;
use command_buffer::DynamicState;
use device::Device;
use device::DeviceOwned;
use instance::QueueFamily;
use VulkanObject;
use vk;
/// Layer around a command buffer builder that caches the current state of the command buffer and
/// avoids redundant state changes.
///
/// For example if you add a command that sets the current vertex buffer, then later another
/// command that sets the current vertex buffer to the same value, then the second one will be
/// discarded by this layer.
///
/// As a general rule there's no reason not to use this layer unless you know that your commands
/// are already optimized in this regard.
///
/// # Safety
///
/// This layer expects that the commands passed to it all belong to the same device.
///
/// Since this layer can potentially optimize out some commands, a mismatch between devices could
/// potentially go undetected if it is checked in a lower layer.
pub struct StateCacheLayer<I> {
// The inner builder that will actually execute the stuff.
inner: I,
// The dynamic state to synchronize with `CmdSetState`.
dynamic_state: DynamicState,
// The compute pipeline currently bound. 0 if nothing bound.
compute_pipeline: vk::Pipeline,
// The graphics pipeline currently bound. 0 if nothing bound.
graphics_pipeline: vk::Pipeline,
// The latest bind vertex buffers command.
vertex_buffers: Option<commands_raw::CmdBindVertexBuffersHash>,
}
impl<I> StateCacheLayer<I> {
/// Builds a new `StateCacheLayer`.
///
/// It is safe to start caching at any point of the construction of a command buffer.
#[inline]
pub fn new(inner: I) -> StateCacheLayer<I> {
StateCacheLayer {
inner: inner,
dynamic_state: DynamicState::none(),
compute_pipeline: 0,
graphics_pipeline: 0,
vertex_buffers: None,
}
}
/// Destroys the layer and returns the underlying command buffer.
#[inline]
pub fn into_inner(self) -> I {
self.inner
}
}
unsafe impl<I> DeviceOwned for StateCacheLayer<I>
where I: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<I> CommandBufferBuilder for StateCacheLayer<I>
where I: CommandBufferBuilder
{
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
}
unsafe impl<Pl, I, O> AddCommand<commands_raw::CmdBindPipeline<Pl>> for StateCacheLayer<I>
where I: AddCommand<commands_raw::CmdBindPipeline<Pl>, Out = O>
{
type Out = StateCacheLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdBindPipeline<Pl>) -> Result<Self::Out, CommandAddError> {
let raw_pipeline = command.sys().internal_object();
let new_command = {
if command.is_graphics() {
if raw_pipeline == self.graphics_pipeline {
command.disabled()
} else {
self.graphics_pipeline = raw_pipeline;
command
}
} else {
if raw_pipeline == self.compute_pipeline {
command.disabled()
} else {
self.compute_pipeline = raw_pipeline;
command
}
}
};
Ok(StateCacheLayer {
inner: self.inner.add(new_command)?,
dynamic_state: DynamicState::none(),
graphics_pipeline: self.graphics_pipeline,
compute_pipeline: self.compute_pipeline,
vertex_buffers: self.vertex_buffers,
})
}
}
unsafe impl<Cb, I, O> AddCommand<commands_raw::CmdExecuteCommands<Cb>> for StateCacheLayer<I>
where I: AddCommand<commands_raw::CmdExecuteCommands<Cb>, Out = O>
{
type Out = StateCacheLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdExecuteCommands<Cb>) -> Result<Self::Out, CommandAddError> {
// After a secondary command buffer is added, all states at reset to the "unknown" state.
let new_inner = self.inner.add(command)?;
Ok(StateCacheLayer {
inner: new_inner,
dynamic_state: DynamicState::none(),
compute_pipeline: 0,
graphics_pipeline: 0,
vertex_buffers: None,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdSetState> for StateCacheLayer<I>
where I: AddCommand<commands_raw::CmdSetState, Out = O>
{
type Out = StateCacheLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdSetState) -> Result<Self::Out, CommandAddError> {
// We need to synchronize `self.dynamic_state` with the state in `command`.
// While doing so, we tweak `command` to erase the states that are the same as what's
// already in `self.dynamic_state`.
let mut command_state = command.state().clone();
// Handle line width.
if let Some(new_val) = command_state.line_width {
if self.dynamic_state.line_width == Some(new_val) {
command_state.line_width = None;
} else {
self.dynamic_state.line_width = Some(new_val);
}
}
// TODO: missing implementations
Ok(StateCacheLayer {
inner: self.inner.add(commands_raw::CmdSetState::new(command.device().clone(), command_state))?,
dynamic_state: self.dynamic_state,
graphics_pipeline: self.graphics_pipeline,
compute_pipeline: self.compute_pipeline,
vertex_buffers: self.vertex_buffers,
})
}
}
unsafe impl<I, O, B> AddCommand<commands_raw::CmdBindVertexBuffers<B>> for StateCacheLayer<I>
where I: AddCommand<commands_raw::CmdBindVertexBuffers<B>, Out = O>
{
type Out = StateCacheLayer<O>;
#[inline]
fn add(mut self, mut command: commands_raw::CmdBindVertexBuffers<B>)
-> Result<Self::Out, CommandAddError>
{
match &mut self.vertex_buffers {
&mut Some(ref mut curr) => {
if *curr != *command.hash() {
let new_hash = command.hash().clone();
command.diff(curr);
*curr = new_hash;
}
},
curr @ &mut None => {
*curr = Some(command.hash().clone());
}
};
Ok(StateCacheLayer {
inner: self.inner.add(command)?,
dynamic_state: self.dynamic_state,
graphics_pipeline: self.graphics_pipeline,
compute_pipeline: self.compute_pipeline,
vertex_buffers: self.vertex_buffers,
})
}
}
unsafe impl<I, O, E> CommandBufferBuild for StateCacheLayer<I>
where I: CommandBufferBuild<Out = O, Err = E>
{
type Out = O;
type Err = E;
#[inline]
fn build(self) -> Result<O, E> {
self.inner.build()
}
}
macro_rules! pass_through {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for StateCacheLayer<I>
where I: AddCommand<$cmd, Out = O>
{
type Out = StateCacheLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
Ok(StateCacheLayer {
inner: self.inner.add(command)?,
dynamic_state: self.dynamic_state,
graphics_pipeline: self.graphics_pipeline,
compute_pipeline: self.compute_pipeline,
vertex_buffers: self.vertex_buffers,
})
}
}
}
}
pass_through!((Rp, F), commands_raw::CmdBeginRenderPass<Rp, F>);
pass_through!((S, Pl), commands_raw::CmdBindDescriptorSets<S, Pl>);
pass_through!((B), commands_raw::CmdBindIndexBuffer<B>);
pass_through!((S, D), commands_raw::CmdBlitImage<S, D>);
pass_through!((), commands_raw::CmdClearAttachments);
pass_through!((S, D), commands_raw::CmdCopyBuffer<S, D>);
pass_through!((S, D), commands_raw::CmdCopyBufferToImage<S, D>);
pass_through!((S, D), commands_raw::CmdCopyImage<S, D>);
pass_through!((), commands_raw::CmdDispatchRaw);
pass_through!((), commands_raw::CmdDrawIndexedRaw);
pass_through!((B), commands_raw::CmdDrawIndirectRaw<B>);
pass_through!((), commands_raw::CmdDrawRaw);
pass_through!((), commands_raw::CmdEndRenderPass);
pass_through!((B), commands_raw::CmdFillBuffer<B>);
pass_through!((), commands_raw::CmdNextSubpass);
pass_through!((Pc, Pl), commands_raw::CmdPushConstants<Pc, Pl>);
pass_through!((S, D), commands_raw::CmdResolveImage<S, D>);
pass_through!((), commands_raw::CmdSetEvent);
pass_through!((B, D), commands_raw::CmdUpdateBuffer<B, D>);

View File

@ -1,869 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::collections::hash_map::Entry;
use std::hash::{Hash, Hasher};
use std::sync::Arc;
use fnv::FnvHashMap;
use buffer::BufferAccess;
use command_buffer::cb::AddCommand;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::cb::UnsafeCommandBuffer;
use command_buffer::CommandAddError;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferBuilder;
use command_buffer::CommandBufferExecError;
use command_buffer::commands_raw;
use framebuffer::FramebufferAbstract;
use image::ImageLayout;
use image::ImageAccess;
use instance::QueueFamily;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use sync::AccessCheckError;
use sync::AccessError;
use sync::AccessFlagBits;
use sync::PipelineStages;
use sync::GpuFuture;
/// Layers that ensures that synchronization with buffers and images between command buffers is
/// properly handled.
///
/// The following are handled:
///
/// - Return an error when submitting if the user didn't provide the guarantees for proper
/// synchronization.
///
/// - Automatically generate pipeline barriers between command buffers if necessary to handle
/// the transition between command buffers.
/// TODO: ^ this is not the case yet
///
pub struct SubmitSyncBuilderLayer<I> {
inner: I,
resources: FnvHashMap<Key, ResourceEntry>,
behavior: SubmitSyncBuilderLayerBehavior,
}
/// How the layer behaves when it comes to image layouts.
#[derive(Debug, Copy, Clone)]
pub enum SubmitSyncBuilderLayerBehavior {
/// When an image is added for the first time to the builder, the layer will suppose that the
/// image is already in the layout that is required by this operation. When submitting the
/// command buffer, the layer will then check whether it is truly the case.
///
/// For example if you create a command buffer with an image copy operation with the
/// TRANSFER_DEST layout, then when submitting the layer will make sure that the image is
/// in the TRANSFER_DEST layout.
Explicit,
/// The layer will call the `ImageAccess::initial_layout_requirement()` and
/// `ImageAccess::final_layout_requirement()` methods, and assume that images respectively
/// enter and leave the builder in these two layouts.
///
/// This supposes that an inner layer (that the submit sync layer is not aware of)
/// automatically performs the required transition if necessary.
///
/// For example if you create a command buffer with an image copy operation with the
/// TRANSFER_DEST layout, then the submit sync layer will suppose that an inner layer
/// automatically performs a transition from the layout returned by
/// `initial_layout_requirement()` to the TRANSFER_DEST layout. When submitting the layer will
/// make sure that the image is in the layout returned by `initial_layout_requirement()`.
///
/// There is only one exception: if the layout of the first usage of the image is `Undefined`
/// or `Preinitialized`, then the layer will not use the hint. This can only happen when
/// entering a render pass, as it is the only command for which these layouts are legal (except
/// for pipeline barriers which are not supported by this layer).
///
/// > **Note**: The exception above is not an optimization. If the initial layout hint of an
/// > image is a layout other than `Preinitialized`, and this image is used for the first time
/// > as `Preinitialized`, then we have a problem. But since is forbidden to perform a
/// > transition *to* the `Preinitialized` layout (and it wouldn't make any sense to do so),
/// > then there is no way to resolve this conflict in an inner layer. That's why we must
/// > assume that the image is in the `Preinitialized` layout in the first place. When it
/// > comes to `Undefined`, however, this is purely an optimization as it is possible to
/// > "transition" to `Undefined` by not doing anything.
UseLayoutHint,
}
enum Key {
Buffer(Box<BufferAccess + Send + Sync>),
Image(Box<ImageAccess + Send + Sync>),
FramebufferAttachment(Box<FramebufferAbstract + Send + Sync>, u32),
}
impl Key {
#[inline]
fn conflicts_buffer_all(&self, buf: &BufferAccess) -> bool {
match self {
&Key::Buffer(ref a) => a.conflicts_buffer_all(buf),
&Key::Image(ref a) => a.conflicts_buffer_all(buf),
&Key::FramebufferAttachment(ref b, idx) => {
let img = b.attachments()[idx as usize].parent();
img.conflicts_buffer_all(buf)
},
}
}
#[inline]
fn conflicts_image_all(&self, img: &ImageAccess) -> bool {
match self {
&Key::Buffer(ref a) => a.conflicts_image_all(img),
&Key::Image(ref a) => a.conflicts_image_all(img),
&Key::FramebufferAttachment(ref b, idx) => {
let b = b.attachments()[idx as usize].parent();
b.conflicts_image_all(img)
},
}
}
}
impl PartialEq for Key {
#[inline]
fn eq(&self, other: &Key) -> bool {
match other {
&Key::Buffer(ref b) => self.conflicts_buffer_all(b),
&Key::Image(ref b) => self.conflicts_image_all(b),
&Key::FramebufferAttachment(ref b, idx) => {
self.conflicts_image_all(b.attachments()[idx as usize].parent())
},
}
}
}
impl Eq for Key {
}
impl Hash for Key {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
match self {
&Key::Buffer(ref buf) => buf.conflict_key_all().hash(state),
&Key::Image(ref img) => img.conflict_key_all().hash(state),
&Key::FramebufferAttachment(ref fb, idx) => {
let img = fb.attachments()[idx as usize].parent();
img.conflict_key_all().hash(state)
},
}
}
}
struct ResourceEntry {
final_stages: PipelineStages,
final_access: AccessFlagBits,
exclusive: bool,
initial_layout: ImageLayout,
final_layout: ImageLayout,
}
impl<I> SubmitSyncBuilderLayer<I> {
/// Builds a new layer that wraps around an existing builder.
#[inline]
pub fn new(inner: I, behavior: SubmitSyncBuilderLayerBehavior) -> SubmitSyncBuilderLayer<I> {
SubmitSyncBuilderLayer {
inner: inner,
resources: FnvHashMap::default(),
behavior: behavior,
}
}
// Adds a buffer to the list.
fn add_buffer<B>(&mut self, buffer: &B, exclusive: bool, stages: PipelineStages,
access: AccessFlagBits)
where B: BufferAccess + Send + Sync + Clone + 'static
{
// TODO: don't create the key every time ; https://github.com/rust-lang/rfcs/pull/1769
let key = Key::Buffer(Box::new(buffer.clone()));
match self.resources.entry(key) {
Entry::Vacant(entry) => {
entry.insert(ResourceEntry {
final_stages: stages,
final_access: access,
exclusive: exclusive,
initial_layout: ImageLayout::Undefined,
final_layout: ImageLayout::Undefined,
});
},
Entry::Occupied(mut entry) => {
let entry = entry.get_mut();
// TODO: remove some stages and accesses when there's an "overflow"?
entry.final_stages = entry.final_stages | stages;
entry.final_access = entry.final_access | access;
entry.exclusive = entry.exclusive || exclusive;
entry.final_layout = ImageLayout::Undefined;
},
}
}
// Adds an image to the list.
fn add_image<T>(&mut self, image: &T, exclusive: bool, stages: PipelineStages,
access: AccessFlagBits)
where T: ImageAccess + Send + Sync + Clone + 'static
{
let key = Key::Image(Box::new(image.clone()));
let initial_layout = match self.behavior {
SubmitSyncBuilderLayerBehavior::Explicit => unimplemented!(), // FIXME:
SubmitSyncBuilderLayerBehavior::UseLayoutHint => image.initial_layout_requirement(),
};
let final_layout = match self.behavior {
SubmitSyncBuilderLayerBehavior::Explicit => unimplemented!(), // FIXME:
SubmitSyncBuilderLayerBehavior::UseLayoutHint => image.final_layout_requirement(),
};
match self.resources.entry(key) {
Entry::Vacant(entry) => {
entry.insert(ResourceEntry {
final_stages: stages,
final_access: access,
exclusive: exclusive,
initial_layout: initial_layout,
final_layout: final_layout,
});
},
Entry::Occupied(mut entry) => {
let entry = entry.get_mut();
// TODO: exclusive accss if transition required?
entry.exclusive = entry.exclusive || exclusive;
// TODO: remove some stages and accesses when there's an "overflow"?
entry.final_stages = entry.final_stages | stages;
entry.final_access = entry.final_access | access;
entry.final_layout = final_layout;
},
}
}
// Adds a framebuffer to the list.
fn add_framebuffer<F>(&mut self, framebuffer: &F)
where F: FramebufferAbstract + Send + Sync + Clone + 'static
{
// TODO: slow
for index in 0 .. FramebufferAbstract::attachments(framebuffer).len() {
let key = Key::FramebufferAttachment(Box::new(framebuffer.clone()), index as u32);
let desc = framebuffer.attachment_desc(index).expect("Wrong implementation of FramebufferAbstract trait");
let image = FramebufferAbstract::attachments(framebuffer)[index];
let initial_layout = match self.behavior {
SubmitSyncBuilderLayerBehavior::Explicit => desc.initial_layout,
SubmitSyncBuilderLayerBehavior::UseLayoutHint => {
match desc.initial_layout {
ImageLayout::Undefined | ImageLayout::Preinitialized => desc.initial_layout,
_ => image.parent().initial_layout_requirement(),
}
},
};
let final_layout = match self.behavior {
SubmitSyncBuilderLayerBehavior::Explicit => desc.final_layout,
SubmitSyncBuilderLayerBehavior::UseLayoutHint => {
match desc.final_layout {
ImageLayout::Undefined | ImageLayout::Preinitialized => desc.final_layout,
_ => image.parent().final_layout_requirement(),
}
},
};
match self.resources.entry(key) {
Entry::Vacant(entry) => {
entry.insert(ResourceEntry {
final_stages: PipelineStages { all_commands: true, ..PipelineStages::none() }, // FIXME:
final_access: AccessFlagBits::all(), // FIXME:
exclusive: true, // FIXME:
initial_layout: initial_layout,
final_layout: final_layout,
});
},
Entry::Occupied(mut entry) => {
let entry = entry.get_mut();
// TODO: update stages and access
entry.exclusive = true; // FIXME:
entry.final_layout = final_layout;
},
}
}
}
}
unsafe impl<I, O, E> CommandBufferBuild for SubmitSyncBuilderLayer<I>
where I: CommandBufferBuild<Out = O, Err = E>
{
type Out = SubmitSyncLayer<O>;
type Err = E;
#[inline]
fn build(self) -> Result<Self::Out, E> {
Ok(SubmitSyncLayer {
inner: try!(self.inner.build()),
resources: self.resources,
})
}
}
unsafe impl<I> DeviceOwned for SubmitSyncBuilderLayer<I>
where I: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
unsafe impl<I> CommandBufferBuilder for SubmitSyncBuilderLayer<I>
where I: CommandBufferBuilder
{
#[inline]
fn queue_family(&self) -> QueueFamily {
self.inner.queue_family()
}
}
// FIXME: implement manually
macro_rules! pass_through {
(($($param:ident),*), $cmd:ty) => {
unsafe impl<'a, I, O $(, $param)*> AddCommand<$cmd> for SubmitSyncBuilderLayer<I>
where I: AddCommand<$cmd, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: $cmd) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
}
}
// FIXME: implement manually
pass_through!((S, Pl), commands_raw::CmdBindDescriptorSets<S, Pl>);
pass_through!((V), commands_raw::CmdBindVertexBuffers<V>);
pass_through!((C), commands_raw::CmdExecuteCommands<C>);
unsafe impl<I, O, Rp, F> AddCommand<commands_raw::CmdBeginRenderPass<Rp, F>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdBeginRenderPass<Rp, F>, Out = O>,
F: FramebufferAbstract + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdBeginRenderPass<Rp, F>) -> Result<Self::Out, CommandAddError> {
self.add_framebuffer(command.framebuffer());
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, B> AddCommand<commands_raw::CmdBindIndexBuffer<B>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdBindIndexBuffer<B>, Out = O>,
B: BufferAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdBindIndexBuffer<B>) -> Result<Self::Out, CommandAddError> {
self.add_buffer(command.buffer(), false,
PipelineStages { vertex_input: true, .. PipelineStages::none() },
AccessFlagBits { index_read: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, P> AddCommand<commands_raw::CmdBindPipeline<P>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdBindPipeline<P>, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdBindPipeline<P>) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, S, D> AddCommand<commands_raw::CmdBlitImage<S, D>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdBlitImage<S, D>, Out = O>,
S: ImageAccess + Send + Sync + Clone + 'static,
D: ImageAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdBlitImage<S, D>) -> Result<Self::Out, CommandAddError> {
self.add_image(command.source(), false,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_read: true, .. AccessFlagBits::none() });
self.add_image(command.destination(), true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdClearAttachments> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdClearAttachments, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdClearAttachments) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, S, D> AddCommand<commands_raw::CmdCopyBuffer<S, D>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdCopyBuffer<S, D>, Out = O>,
S: BufferAccess + Send + Sync + Clone + 'static,
D: BufferAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdCopyBuffer<S, D>) -> Result<Self::Out, CommandAddError> {
self.add_buffer(command.source(), false,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_read: true, .. AccessFlagBits::none() });
self.add_buffer(command.destination(), true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, S, D> AddCommand<commands_raw::CmdCopyBufferToImage<S, D>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdCopyBufferToImage<S, D>, Out = O>,
S: BufferAccess + Send + Sync + Clone + 'static,
D: ImageAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdCopyBufferToImage<S, D>) -> Result<Self::Out, CommandAddError> {
self.add_buffer(command.source(), false,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_read: true, .. AccessFlagBits::none() });
self.add_image(command.destination(), true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, S, D> AddCommand<commands_raw::CmdCopyImage<S, D>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdCopyImage<S, D>, Out = O>,
S: ImageAccess + Send + Sync + Clone + 'static,
D: ImageAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdCopyImage<S, D>) -> Result<Self::Out, CommandAddError> {
self.add_image(command.source(), false,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_read: true, .. AccessFlagBits::none() });
self.add_image(command.destination(), true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdDispatchRaw> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdDispatchRaw, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdDispatchRaw) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdDrawRaw> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdDrawRaw, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdDrawRaw) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdDrawIndexedRaw> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdDrawIndexedRaw, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdDrawIndexedRaw) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, B> AddCommand<commands_raw::CmdDrawIndirectRaw<B>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdDrawIndirectRaw<B>, Out = O>,
B: BufferAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdDrawIndirectRaw<B>) -> Result<Self::Out, CommandAddError> {
self.add_buffer(command.buffer(), false,
PipelineStages { draw_indirect: true, .. PipelineStages::none() },
AccessFlagBits { indirect_command_read: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdEndRenderPass> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdEndRenderPass, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdEndRenderPass) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, B> AddCommand<commands_raw::CmdFillBuffer<B>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdFillBuffer<B>, Out = O>,
B: BufferAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdFillBuffer<B>) -> Result<Self::Out, CommandAddError> {
self.add_buffer(command.buffer(), true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdNextSubpass> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdNextSubpass, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdNextSubpass) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, Pc, Pl> AddCommand<commands_raw::CmdPushConstants<Pc, Pl>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdPushConstants<Pc, Pl>, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdPushConstants<Pc, Pl>) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, S, D> AddCommand<commands_raw::CmdResolveImage<S, D>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdResolveImage<S, D>, Out = O>,
S: ImageAccess + Send + Sync + Clone + 'static,
D: ImageAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdResolveImage<S, D>) -> Result<Self::Out, CommandAddError> {
self.add_image(command.source(), false,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
self.add_image(command.destination(), true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdSetEvent> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdSetEvent, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdSetEvent) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O> AddCommand<commands_raw::CmdSetState> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdSetState, Out = O>
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(self, command: commands_raw::CmdSetState) -> Result<Self::Out, CommandAddError> {
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
unsafe impl<I, O, B, D> AddCommand<commands_raw::CmdUpdateBuffer<B, D>> for SubmitSyncBuilderLayer<I>
where I: AddCommand<commands_raw::CmdUpdateBuffer<B, D>, Out = O>,
B: BufferAccess + Send + Sync + Clone + 'static
{
type Out = SubmitSyncBuilderLayer<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdUpdateBuffer<B, D>) -> Result<Self::Out, CommandAddError> {
self.add_buffer(command.buffer(), true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
Ok(SubmitSyncBuilderLayer {
inner: AddCommand::add(self.inner, command)?,
resources: self.resources,
behavior: self.behavior,
})
}
}
/// Layer around a command buffer that handles synchronization between command buffers.
pub struct SubmitSyncLayer<I> {
inner: I,
resources: FnvHashMap<Key, ResourceEntry>,
}
unsafe impl<I> CommandBuffer for SubmitSyncLayer<I> where I: CommandBuffer {
type Pool = I::Pool;
#[inline]
fn inner(&self) -> &UnsafeCommandBuffer<I::Pool> {
self.inner.inner()
}
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
// TODO: if at any point we return an error, we can't recover
for (key, entry) in self.resources.iter() {
match key {
&Key::Buffer(ref buf) => {
let prev_err = match future.check_buffer_access(&buf, entry.exclusive, queue) {
Ok(_) => {
unsafe { buf.increase_gpu_lock(); }
continue;
},
Err(err) => err
};
match (buf.try_gpu_lock(entry.exclusive, queue), prev_err) {
(Ok(_), _) => (),
(Err(err), AccessCheckError::Unknown) => return Err(err.into()),
(_, AccessCheckError::Denied(err)) => return Err(err.into()),
}
},
&Key::Image(ref img) => {
let prev_err = match future.check_image_access(img, entry.initial_layout,
entry.exclusive, queue)
{
Ok(_) => {
unsafe { img.increase_gpu_lock(); }
continue;
},
Err(err) => err
};
match (img.try_gpu_lock(entry.exclusive, queue), prev_err) {
(Ok(_), _) => (),
(Err(err), AccessCheckError::Unknown) => return Err(err.into()),
(_, AccessCheckError::Denied(err)) => return Err(err.into()),
}
},
&Key::FramebufferAttachment(ref fb, idx) => {
let img = fb.attachments()[idx as usize].parent();
let prev_err = match future.check_image_access(img, entry.initial_layout,
entry.exclusive, queue)
{
Ok(_) => {
unsafe { img.increase_gpu_lock(); }
continue;
},
Err(err) => err
};
// FIXME: this is bad because dropping the submit sync layer doesn't drop the
// attachments of the framebuffer, meaning that they will stay locked
match (img.try_gpu_lock(entry.exclusive, queue), prev_err) {
(Ok(_), _) => (),
(Err(err), AccessCheckError::Unknown) => return Err(err.into()),
(_, AccessCheckError::Denied(err)) => return Err(err.into()),
}
},
}
}
// FIXME: pipeline barriers if necessary?
Ok(())
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
// TODO: check the queue family
// We can't call `.get()` on the HashMap because of the `Borrow` requirement that's
// unimplementable on our key type.
// TODO:
for (key, value) in self.resources.iter() {
if !key.conflicts_buffer_all(buffer) {
continue;
}
if !value.exclusive && exclusive {
return Err(AccessCheckError::Denied(AccessError::ExclusiveDenied));
}
return Ok(Some((value.final_stages, value.final_access)));
}
Err(AccessCheckError::Unknown)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
// TODO: check the queue family
// We can't call `.get()` on the HashMap because of the `Borrow` requirement that's
// unimplementable on our key type.
// TODO:
for (key, value) in self.resources.iter() {
if !key.conflicts_image_all(image) {
continue;
}
if layout != ImageLayout::Undefined && value.final_layout != layout {
return Err(AccessCheckError::Denied(AccessError::UnexpectedImageLayout {
allowed: value.final_layout,
requested: layout,
}));
}
if !value.exclusive && exclusive {
return Err(AccessCheckError::Denied(AccessError::ExclusiveDenied));
}
return Ok(Some((value.final_stages, value.final_access)));
}
Err(AccessCheckError::Unknown)
}
}
unsafe impl<I> DeviceOwned for SubmitSyncLayer<I> where I: DeviceOwned {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}

View File

@ -1,340 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::ptr;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use buffer::BufferAccess;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferBuilder;
use command_buffer::CommandBufferExecError;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::pool::CommandPool;
use command_buffer::pool::CommandPoolBuilderAlloc;
use command_buffer::pool::CommandPoolAlloc;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use framebuffer::EmptySinglePassRenderPassDesc;
use framebuffer::Framebuffer;
use framebuffer::FramebufferAbstract;
use framebuffer::RenderPass;
use framebuffer::RenderPassAbstract;
use framebuffer::Subpass;
use image::ImageLayout;
use image::ImageAccess;
use instance::QueueFamily;
use sync::AccessCheckError;
use sync::AccessFlagBits;
use sync::PipelineStages;
use sync::GpuFuture;
use OomError;
use VulkanObject;
use check_errors;
use vk;
/// Determines the kind of command buffer that we want to create.
#[derive(Debug, Clone)]
pub enum Kind<R, F> {
/// A primary command buffer can execute all commands and can call secondary command buffers.
Primary,
/// A secondary command buffer can execute all dispatch and transfer operations, but not
/// drawing operations.
Secondary,
/// A secondary command buffer within a render pass can only call draw operations that can
/// be executed from within a specific subpass.
SecondaryRenderPass {
/// Which subpass this secondary command buffer can be called from.
subpass: Subpass<R>,
/// The framebuffer object that will be used when calling the command buffer.
/// This parameter is optional and is an optimization hint for the implementation.
framebuffer: Option<F>,
},
}
impl Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
/// Equivalent to `Kind::Primary`.
///
/// > **Note**: If you use `let kind = Kind::Primary;` in your code, you will probably get a
/// > compilation error because the Rust compiler couldn't determine the template parameters
/// > of `Kind`. To solve that problem in an easy way you can use this function instead.
#[inline]
pub fn primary() -> Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
Kind::Primary
}
}
/// Flags to pass when creating a command buffer.
///
/// The safest option is `SimultaneousUse`, but it may be slower than the other two.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Flags {
/// The command buffer can be used multiple times, but must not execute more than once
/// simultaneously.
None,
/// The command buffer can be executed multiple times in parallel.
SimultaneousUse,
/// The command buffer can only be submitted once. Any further submit is forbidden.
OneTimeSubmit,
}
/// Command buffer being built.
///
/// You can add commands to an `UnsafeCommandBufferBuilder` by using the `AddCommand` trait.
/// The `AddCommand<&Cmd>` trait is implemented on the `UnsafeCommandBufferBuilder` for any `Cmd`
/// that is a raw Vulkan command.
///
/// When you are finished adding commands, you can use the `CommandBufferBuild` trait to turn this
/// builder into an `UnsafeCommandBuffer`.
// TODO: change P parameter to be a CommandPoolBuilderAlloc
pub struct UnsafeCommandBufferBuilder<P> where P: CommandPool {
// The command buffer obtained from the pool. Contains `None` if `build()` has been called.
cmd: Option<P::Builder>,
// Device that owns the command buffer.
// TODO: necessary?
device: Arc<Device>,
// Flags that were used at creation.
// TODO: necessary?
flags: Flags,
// True if we are a secondary command buffer.
// TODO: necessary?
secondary_cb: bool,
}
impl<P> UnsafeCommandBufferBuilder<P> where P: CommandPool {
/// Creates a new builder.
///
/// # Safety
///
/// Creating and destroying an unsafe command buffer is not unsafe per se, but the commands
/// that you add to it are unchecked, do not have any synchronization, and are not kept alive.
///
/// In other words, it is your job to make sure that the commands you add are valid, that they
/// don't use resources that have been destroyed, and that they do not introduce any race
/// condition.
///
/// > **Note**: Some checks are still made with `debug_assert!`. Do not expect to be able to
/// > submit invalid commands.
pub unsafe fn new<R, F>(pool: &P, kind: Kind<R, F>, flags: Flags)
-> Result<UnsafeCommandBufferBuilder<P>, OomError>
where R: RenderPassAbstract, F: FramebufferAbstract
{
let secondary = match kind {
Kind::Primary => false,
Kind::Secondary | Kind::SecondaryRenderPass { .. } => true,
};
let cmd = try!(pool.alloc(secondary, 1)).next().expect("Requested one command buffer from \
the command pool, but got zero.");
UnsafeCommandBufferBuilder::already_allocated(cmd, kind, flags)
}
/// Creates a new command buffer builder from an already-allocated command buffer.
///
/// # Safety
///
/// See the `new` method.
///
/// The kind must match how the command buffer was allocated.
///
pub unsafe fn already_allocated<R, F>(alloc: P::Builder, kind: Kind<R, F>, flags: Flags)
-> Result<UnsafeCommandBufferBuilder<P>, OomError>
where R: RenderPassAbstract, F: FramebufferAbstract
{
let device = alloc.device().clone();
let vk = device.pointers();
let cmd = alloc.inner().internal_object();
let vk_flags = {
let a = match flags {
Flags::None => 0,
Flags::SimultaneousUse => vk::COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
Flags::OneTimeSubmit => vk::COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
};
let b = match kind {
Kind::Primary | Kind::Secondary => 0,
Kind::SecondaryRenderPass { .. } => {
vk::COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
},
};
a | b
};
let (rp, sp) = if let Kind::SecondaryRenderPass { ref subpass, .. } = kind {
(subpass.render_pass().inner().internal_object(), subpass.index())
} else {
(0, 0)
};
let framebuffer = if let Kind::SecondaryRenderPass { ref subpass, framebuffer: Some(ref framebuffer) } = kind {
// TODO: restore check
//assert!(framebuffer.is_compatible_with(subpass.render_pass())); // TODO: proper error
FramebufferAbstract::inner(&framebuffer).internal_object()
} else {
0
};
let inheritance = vk::CommandBufferInheritanceInfo {
sType: vk::STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
pNext: ptr::null(),
renderPass: rp,
subpass: sp,
framebuffer: framebuffer,
occlusionQueryEnable: 0, // TODO:
queryFlags: 0, // TODO:
pipelineStatistics: 0, // TODO:
};
let infos = vk::CommandBufferBeginInfo {
sType: vk::STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
pNext: ptr::null(),
flags: vk_flags,
pInheritanceInfo: &inheritance,
};
try!(check_errors(vk.BeginCommandBuffer(cmd, &infos)));
Ok(UnsafeCommandBufferBuilder {
cmd: Some(alloc),
device: device.clone(),
flags: flags,
secondary_cb: match kind {
Kind::Primary => false,
Kind::Secondary | Kind::SecondaryRenderPass { .. } => true,
},
})
}
}
unsafe impl<P> DeviceOwned for UnsafeCommandBufferBuilder<P> where P: CommandPool {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
unsafe impl<P> CommandBufferBuilder for UnsafeCommandBufferBuilder<P> where P: CommandPool {
#[inline]
fn queue_family(&self) -> QueueFamily {
self.cmd.as_ref().unwrap().queue_family()
}
}
unsafe impl<P> VulkanObject for UnsafeCommandBufferBuilder<P> where P: CommandPool {
type Object = vk::CommandBuffer;
#[inline]
fn internal_object(&self) -> vk::CommandBuffer {
self.cmd.as_ref().unwrap().inner().internal_object()
}
}
unsafe impl<P> CommandBufferBuild for UnsafeCommandBufferBuilder<P>
where P: CommandPool
{
type Out = UnsafeCommandBuffer<P>;
type Err = OomError;
#[inline]
fn build(mut self) -> Result<Self::Out, OomError> {
unsafe {
let cmd = self.cmd.take().unwrap();
let vk = self.device.pointers();
try!(check_errors(vk.EndCommandBuffer(cmd.inner().internal_object())));
Ok(UnsafeCommandBuffer {
cmd: cmd.into_alloc(),
device: self.device.clone(),
flags: self.flags,
already_submitted: AtomicBool::new(false),
secondary_cb: self.secondary_cb
})
}
}
}
/// Command buffer that has been built.
///
/// Doesn't perform any synchronization and doesn't keep the object it uses alive.
// TODO: change P parameter to be a CommandPoolAlloc
pub struct UnsafeCommandBuffer<P> where P: CommandPool {
// The Vulkan command buffer.
cmd: P::Alloc,
// Device that owns the command buffer.
// TODO: necessary?
device: Arc<Device>,
// Flags that were used at creation.
// TODO: necessary?
flags: Flags,
// True if the command buffer has always been submitted once. Only relevant if `flags` is
// `OneTimeSubmit`.
already_submitted: AtomicBool,
// True if this command buffer belongs to a secondary pool - needed for Drop
secondary_cb: bool
}
unsafe impl<P> CommandBuffer for UnsafeCommandBuffer<P> where P: CommandPool {
type Pool = P;
#[inline]
fn inner(&self) -> &UnsafeCommandBuffer<P> {
self
}
#[inline]
fn prepare_submit(&self, _: &GpuFuture, _: &Queue) -> Result<(), CommandBufferExecError> {
// Not our job to check.
Ok(())
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
Err(AccessCheckError::Unknown)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
Err(AccessCheckError::Unknown)
}
}
unsafe impl<P> DeviceOwned for UnsafeCommandBuffer<P> where P: CommandPool {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
unsafe impl<P> VulkanObject for UnsafeCommandBuffer<P> where P: CommandPool {
type Object = vk::CommandBuffer;
#[inline]
fn internal_object(&self) -> vk::CommandBuffer {
self.cmd.inner().internal_object()
}
}

View File

@ -1,20 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use command_buffer::CommandAddError;
/// Adds a command to a command buffer builder.
pub unsafe trait AddCommand<C> {
/// The new command buffer builder type.
type Out;
/// Adds the command. This takes ownership of the builder and returns a new builder with the
/// command appended at the end of it.
fn add(self, cmd: C) -> Result<Self::Out, CommandAddError>;
}

View File

@ -75,9 +75,9 @@
//! information.
pub use self::auto::AutoCommandBufferBuilder;
pub use self::builder::CommandAddError;
pub use self::builder::CommandBufferBuilder;
pub use self::builder::CommandBufferBuilderError;
pub use self::auto::AutoCommandBuffer;
pub use self::state_cacher::StateCacher;
pub use self::state_cacher::StateCacherOutcome;
pub use self::traits::CommandBuffer;
pub use self::traits::CommandBufferBuild;
pub use self::traits::CommandBufferExecError;
@ -86,14 +86,14 @@ pub use self::traits::CommandBufferExecFuture;
use pipeline::viewport::Viewport;
use pipeline::viewport::Scissor;
pub mod cb;
pub mod commands_extra;
pub mod commands_raw;
pub mod pool;
pub mod submit;
pub mod synced;
pub mod sys;
pub mod validity;
mod auto;
mod builder;
mod state_cacher;
mod traits;
#[repr(C)]
@ -124,11 +124,13 @@ pub struct DispatchIndirectCommand {
}
/// The dynamic state to use for a draw command.
// TODO: probably not the right location
#[derive(Debug, Clone)]
pub struct DynamicState {
pub line_width: Option<f32>,
pub viewports: Option<Vec<Viewport>>,
pub scissors: Option<Vec<Scissor>>,
// TODO: missing fields
}
impl DynamicState {

View File

@ -0,0 +1,155 @@
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use command_buffer::DynamicState;
use pipeline::ComputePipelineAbstract;
use pipeline::GraphicsPipelineAbstract;
use VulkanObject;
use vk;
/// Keep track of the state of a command buffer builder, so that you don't need to bind objects
/// that were already bound.
///
/// > **Important**: Executing a secondary command buffer invalidates the state of a command buffer
/// > builder. When you do so, you need to call `invalidate()`.
pub struct StateCacher {
// The dynamic state to synchronize with `CmdSetState`.
dynamic_state: DynamicState,
// The compute pipeline currently bound. 0 if nothing bound.
compute_pipeline: vk::Pipeline,
// The graphics pipeline currently bound. 0 if nothing bound.
graphics_pipeline: vk::Pipeline,
}
/// Outcome of an operation.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum StateCacherOutcome {
/// The caller needs to perform the state change in the actual command buffer builder.
NeedChange,
/// The state change is not necessary.
AlreadyOk,
}
impl StateCacher {
/// Builds a new `StateCacher`.
#[inline]
pub fn new() -> StateCacher {
StateCacher {
dynamic_state: DynamicState::none(),
compute_pipeline: 0,
graphics_pipeline: 0,
}
}
/// Resets the cache to its default state. You **must** call this after executing a secondary
/// command buffer.
#[inline]
pub fn invalidate(&mut self) {
self.dynamic_state = DynamicState::none();
self.compute_pipeline = 0;
self.graphics_pipeline = 0;
}
/// Checks whether we need to bind a graphics pipeline. Returns `StateCacherOutcome::AlreadyOk`
/// if the pipeline was already bound earlier, and `StateCacherOutcome::NeedChange` if you need
/// to actually bind the pipeline.
pub fn bind_graphics_pipeline<P>(&mut self, pipeline: &P) -> StateCacherOutcome
where P: GraphicsPipelineAbstract
{
let inner = GraphicsPipelineAbstract::inner(pipeline).internal_object();
if inner == self.graphics_pipeline {
StateCacherOutcome::AlreadyOk
} else {
self.graphics_pipeline = inner;
StateCacherOutcome::NeedChange
}
}
/// Checks whether we need to bind a compute pipeline. Returns `StateCacherOutcome::AlreadyOk`
/// if the pipeline was already bound earlier, and `StateCacherOutcome::NeedChange` if you need
/// to actually bind the pipeline.
pub fn bind_compute_pipeline<P>(&mut self, pipeline: &P) -> StateCacherOutcome
where P: ComputePipelineAbstract
{
let inner = pipeline.inner().internal_object();
if inner == self.compute_pipeline {
StateCacherOutcome::AlreadyOk
} else {
self.compute_pipeline = inner;
StateCacherOutcome::NeedChange
}
}
}
/*
unsafe impl<I, O> AddCommand<commands_raw::CmdSetState> for StateCacher<I>
where I: AddCommand<commands_raw::CmdSetState, Out = O>
{
type Out = StateCacher<O>;
#[inline]
fn add(mut self, command: commands_raw::CmdSetState) -> Result<Self::Out, CommandAddError> {
// We need to synchronize `self.dynamic_state` with the state in `command`.
// While doing so, we tweak `command` to erase the states that are the same as what's
// already in `self.dynamic_state`.
let mut command_state = command.state().clone();
// Handle line width.
if let Some(new_val) = command_state.line_width {
if self.dynamic_state.line_width == Some(new_val) {
command_state.line_width = None;
} else {
self.dynamic_state.line_width = Some(new_val);
}
}
// TODO: missing implementations
Ok(StateCacher {
inner: self.inner.add(commands_raw::CmdSetState::new(command.device().clone(), command_state))?,
dynamic_state: self.dynamic_state,
graphics_pipeline: self.graphics_pipeline,
compute_pipeline: self.compute_pipeline,
vertex_buffers: self.vertex_buffers,
})
}
}
unsafe impl<I, O, B> AddCommand<commands_raw::CmdBindVertexBuffers<B>> for StateCacher<I>
where I: AddCommand<commands_raw::CmdBindVertexBuffers<B>, Out = O>
{
type Out = StateCacher<O>;
#[inline]
fn add(mut self, mut command: commands_raw::CmdBindVertexBuffers<B>)
-> Result<Self::Out, CommandAddError>
{
match &mut self.vertex_buffers {
&mut Some(ref mut curr) => {
if *curr != *command.hash() {
let new_hash = command.hash().clone();
command.diff(curr);
*curr = new_hash;
}
},
curr @ &mut None => {
*curr = Some(command.hash().clone());
}
};
Ok(StateCacher {
inner: self.inner.add(command)?,
dynamic_state: self.dynamic_state,
graphics_pipeline: self.graphics_pipeline,
compute_pipeline: self.compute_pipeline,
vertex_buffers: self.vertex_buffers,
})
}
}*/

View File

@ -13,8 +13,7 @@ use std::marker::PhantomData;
use std::ptr;
use smallvec::SmallVec;
use command_buffer::cb::UnsafeCommandBuffer;
use command_buffer::pool::CommandPool;
use command_buffer::sys::UnsafeCommandBuffer;
use device::Queue;
use sync::Fence;
use sync::PipelineStages;
@ -167,9 +166,7 @@ impl<'a> SubmitCommandBufferBuilder<'a> {
/// TODO: more here
///
#[inline]
pub unsafe fn add_command_buffer<P>(&mut self, command_buffer: &'a UnsafeCommandBuffer<P>)
where P: CommandPool
{
pub unsafe fn add_command_buffer<P>(&mut self, command_buffer: &'a UnsafeCommandBuffer<P>) {
self.command_buffers.push(command_buffer.internal_object());
}

View File

@ -0,0 +1,853 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::collections::hash_map::Entry;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::DerefMut;
use std::sync::Arc;
use fnv::FnvHashMap;
use smallvec::SmallVec;
use buffer::BufferAccess;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferExecError;
use command_buffer::pool::CommandPool;
use command_buffer::pool::CommandPoolAlloc;
use command_buffer::pool::CommandPoolBuilderAlloc;
use command_buffer::sys::Flags;
use command_buffer::sys::Kind;
use command_buffer::sys::UnsafeCommandBuffer;
use command_buffer::sys::UnsafeCommandBufferBuilder;
use command_buffer::sys::UnsafeCommandBufferBuilderBindVertexBuffer;
use command_buffer::sys::UnsafeCommandBufferBuilderBufferImageCopy;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor::ShaderStages;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use format::ClearValue;
use framebuffer::Framebuffer;
use framebuffer::FramebufferAbstract;
use framebuffer::RenderPass;
use framebuffer::RenderPassAbstract;
use image::ImageLayout;
use image::ImageAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use pipeline::ComputePipelineAbstract;
use pipeline::GraphicsPipelineAbstract;
use pipeline::input_assembly::IndexType;
use pipeline::vertex::VertexSource;
use pipeline::viewport::Scissor;
use pipeline::viewport::Viewport;
use sync::AccessCheckError;
use sync::AccessError;
use sync::AccessFlagBits;
use sync::Event;
use sync::PipelineStages;
use sync::GpuFuture;
use OomError;
pub struct SyncCommandBufferBuilder<P> {
inner: UnsafeCommandBufferBuilder<P>,
resources: FnvHashMap<Key<'static>, ResourceEntry>,
// Contains the objects other than the ones in `resources` that must be kept alive while the
// command buffer is being executed (eg. pipeline objects, ...).
keep_alive: Vec<Box<KeepAlive + Send + Sync>>,
}
pub enum SyncCommandBufferBuilderError {
}
// Trait implemented on everything, so that we can turn any `T` into a `Box<KeepAlive>`.
trait KeepAlive {}
impl<T> KeepAlive for T {}
// Key that identifies a resource. Implements `PartialEq`, `Eq` and `Hash` so that two resources
// that conflict with each other compare equal.
enum Key<'a> {
// A buffer.
Buffer(Box<BufferAccess + Send + Sync>),
// References to a buffer. This variant of the key must never be stored in a hashmap. Instead
// it must be used only when creating a temporary key to lookup an entry in said hashmap.
BufferRef(&'a BufferAccess),
// An image.
Image(Box<ImageAccess + Send + Sync>),
// References to a buffer. This variant of the key must never be stored in a hashmap. Instead
// it must be used only when creating a temporary key to lookup an entry in said hashmap.
ImageRef(&'a ImageAccess),
FramebufferAttachment(Box<FramebufferAbstract + Send + Sync>, u32),
}
// `BufferRef` and `ImageRef` don't implement `Send`/`Sync`, but all other variants do. Since these
// two exceptions must never be stored in a hashmap, we implement `Send`/`Sync` manually so that
// the hashmap implements `Send` and `Sync` as well.
unsafe impl<'a> Send for Key<'a> {}
unsafe impl<'a> Sync for Key<'a> {}
impl<'a> Key<'a> {
#[inline]
fn conflicts_buffer_all(&self, buf: &BufferAccess) -> bool {
match self {
&Key::Buffer(ref a) => a.conflicts_buffer_all(buf),
&Key::BufferRef(ref a) => a.conflicts_buffer_all(buf),
&Key::Image(ref a) => a.conflicts_buffer_all(buf),
&Key::ImageRef(ref a) => a.conflicts_buffer_all(buf),
&Key::FramebufferAttachment(ref b, idx) => {
let img = b.attachments()[idx as usize].parent();
img.conflicts_buffer_all(buf)
},
}
}
#[inline]
fn conflicts_image_all(&self, img: &ImageAccess) -> bool {
match self {
&Key::Buffer(ref a) => a.conflicts_image_all(img),
&Key::BufferRef(ref a) => a.conflicts_image_all(img),
&Key::Image(ref a) => a.conflicts_image_all(img),
&Key::ImageRef(ref a) => a.conflicts_image_all(img),
&Key::FramebufferAttachment(ref b, idx) => {
let b = b.attachments()[idx as usize].parent();
b.conflicts_image_all(img)
},
}
}
}
impl<'a> PartialEq for Key<'a> {
#[inline]
fn eq(&self, other: &Key) -> bool {
match other {
&Key::Buffer(ref b) => self.conflicts_buffer_all(b),
&Key::BufferRef(ref b) => self.conflicts_buffer_all(b),
&Key::Image(ref b) => self.conflicts_image_all(b),
&Key::ImageRef(ref b) => self.conflicts_image_all(b),
&Key::FramebufferAttachment(ref b, idx) => {
self.conflicts_image_all(b.attachments()[idx as usize].parent())
},
}
}
}
impl<'a> Eq for Key<'a> {
}
impl<'a> Hash for Key<'a> {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
match self {
&Key::Buffer(ref buf) => buf.conflict_key_all().hash(state),
&Key::BufferRef(ref buf) => buf.conflict_key_all().hash(state),
&Key::Image(ref img) => img.conflict_key_all().hash(state),
&Key::ImageRef(ref img) => img.conflict_key_all().hash(state),
&Key::FramebufferAttachment(ref fb, idx) => {
let img = fb.attachments()[idx as usize].parent();
img.conflict_key_all().hash(state)
},
}
}
}
// Synchronization state of a resource.
struct ResourceEntry {
final_stages: PipelineStages,
final_access: AccessFlagBits,
exclusive: bool,
initial_layout: ImageLayout,
final_layout: ImageLayout,
}
// TODO: should check conflicts within each command
struct Binder<'r> {
resources: &'r mut FnvHashMap<Key<'static>, ResourceEntry>,
insertions: SmallVec<[Option<ResourceEntry>; 16]>,
}
fn start<'r>(resources: &'r mut FnvHashMap<Key<'static>, ResourceEntry>) -> Binder<'r> {
Binder {
resources,
insertions: SmallVec::new(),
}
}
impl<'r> Binder<'r> {
fn add_buffer<B>(&mut self, buffer: &B, exclusive: bool, stages: PipelineStages,
access: AccessFlagBits)
where B: BufferAccess
{
// TODO: yay, can't even call `get_mut` on the hash map without a Key that is 'static
// itself ; Rust needs HKTs for that
let key: Key = Key::BufferRef(buffer);
let key: Key<'static> = unsafe { mem::transmute(key) };
match self.resources.get_mut(&key) {
Some(entry) => {
// TODO: remove some stages and accesses when there's an "overflow"?
entry.final_stages = entry.final_stages | stages;
entry.final_access = entry.final_access | access;
entry.exclusive = entry.exclusive || exclusive;
entry.final_layout = ImageLayout::Undefined;
self.insertions.push(None);
},
None => {
self.insertions.push(Some(ResourceEntry {
final_stages: stages,
final_access: access,
exclusive: exclusive,
initial_layout: ImageLayout::Undefined,
final_layout: ImageLayout::Undefined,
}));
},
}
}
fn add_image<I>(&mut self, image: &I, exclusive: bool, stages: PipelineStages,
access: AccessFlagBits, initial_layout: ImageLayout, final_layout: ImageLayout)
where I: ImageAccess
{
// TODO: yay, can't even call `get_mut` on the hash map without a Key that is 'static
// itself ; Rust needs HKTs for that
let key: Key = Key::ImageRef(image);
let key: Key<'static> = unsafe { mem::transmute(key) };
match self.resources.get_mut(&key) {
Some(entry) => {
// TODO: exclusive accss if transition required?
entry.exclusive = entry.exclusive || exclusive;
// TODO: remove some stages and accesses when there's an "overflow"?
entry.final_stages = entry.final_stages | stages;
entry.final_access = entry.final_access | access;
entry.final_layout = final_layout;
self.insertions.push(None);
},
None => {
self.insertions.push(Some(ResourceEntry {
final_stages: stages,
final_access: access,
exclusive: exclusive,
initial_layout: initial_layout,
final_layout: final_layout,
}));
}
}
}
fn flush_pipeline_barrier<P>(&mut self, inner: &mut UnsafeCommandBufferBuilder<P>) {
// TODO:
}
fn finish_buf<B>(&mut self, buffer: B)
where B: BufferAccess + Send + Sync + 'static
{
match self.insertions.remove(0) {
None => (),
Some(entry) => {
let prev_value = self.resources.insert(Key::Buffer(Box::new(buffer) as Box<_>), entry);
debug_assert!(prev_value.is_none());
},
}
}
fn finish_img<I>(&mut self, image: I)
where I: ImageAccess + Send + Sync + 'static
{
match self.insertions.remove(0) {
None => (),
Some(entry) => {
let prev_value = self.resources.insert(Key::Image(Box::new(image) as Box<_>), entry);
debug_assert!(prev_value.is_none());
},
}
}
}
impl<P> SyncCommandBufferBuilder<P> {
pub unsafe fn new<Pool, R, F, A>(pool: &Pool, kind: Kind<R, F>, flags: Flags)
-> Result<SyncCommandBufferBuilder<P>, OomError>
where Pool: CommandPool<Builder = P, Alloc = A>,
P: CommandPoolBuilderAlloc<Alloc = A>,
A: CommandPoolAlloc,
R: RenderPassAbstract,
F: FramebufferAbstract
{
let cmd = UnsafeCommandBufferBuilder::new(pool, kind, flags)?;
Ok(SyncCommandBufferBuilder::from_unsafe_cmd(cmd))
}
#[inline]
fn from_unsafe_cmd(cmd: UnsafeCommandBufferBuilder<P>) -> SyncCommandBufferBuilder<P> {
SyncCommandBufferBuilder {
inner: cmd,
resources: FnvHashMap::default(),
keep_alive: Vec::new(),
}
}
/// Builds the command buffer.
#[inline]
pub fn build(mut self) -> Result<SyncCommandBuffer<P::Alloc>, OomError>
where P: CommandPoolBuilderAlloc
{
// TODO: only do this if we don't have the one time submit flag
self.resources.shrink_to_fit();
self.keep_alive.shrink_to_fit();
Ok(SyncCommandBuffer {
inner: self.inner.build()?,
resources: self.resources,
keep_alive: self.keep_alive,
})
}
// Adds a framebuffer to the list.
fn add_framebuffer<F>(&mut self, framebuffer: F)
where F: FramebufferAbstract + Send + Sync + 'static
{
/*// TODO: slow
for index in 0 .. FramebufferAbstract::attachments(framebuffer).len() {
let key = Key::FramebufferAttachment(Box::new(framebuffer.clone()), index as u32);
let desc = framebuffer.attachment_desc(index).expect("Wrong implementation of FramebufferAbstract trait");
let image = FramebufferAbstract::attachments(framebuffer)[index];
let initial_layout = {
match desc.initial_layout {
ImageLayout::Undefined | ImageLayout::Preinitialized => desc.initial_layout,
_ => image.parent().initial_layout_requirement(),
}
};
let final_layout = {
match desc.final_layout {
ImageLayout::Undefined | ImageLayout::Preinitialized => desc.final_layout,
_ => image.parent().final_layout_requirement(),
}
};
match self.resources.entry(key) {
Entry::Vacant(entry) => {
entry.insert(ResourceEntry {
final_stages: PipelineStages { all_commands: true, ..PipelineStages::none() }, // FIXME:
final_access: AccessFlagBits::all(), // FIXME:
exclusive: true, // FIXME:
initial_layout: initial_layout,
final_layout: final_layout,
});
},
Entry::Occupied(mut entry) => {
let entry = entry.get_mut();
// TODO: update stages and access
entry.exclusive = true; // FIXME:
entry.final_layout = final_layout;
},
}
}*/
}
/// Calls `vkBeginRenderPass` on the builder.
#[inline]
pub unsafe fn begin_render_pass<F, I>(&mut self, framebuffer: F, secondary: bool,
clear_values: I)
where F: FramebufferAbstract + Send + Sync + 'static,
I: Iterator<Item = ClearValue>
{
self.inner.begin_render_pass(&framebuffer, secondary, clear_values);
self.add_framebuffer(framebuffer);
}
/// Calls `vkCmdBindIndexBuffer` on the builder.
#[inline]
pub unsafe fn bind_index_buffer<B>(&mut self, buffer: B, index_ty: IndexType)
where B: BufferAccess + Send + Sync + 'static
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&buffer, false,
PipelineStages { vertex_input: true, .. PipelineStages::none() },
AccessFlagBits { index_read: true, .. AccessFlagBits::none() });
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.bind_index_buffer(&buffer, index_ty);
binder.finish_buf(buffer);
}
/// Calls `vkCmdBindPipeline` on the builder with a graphics pipeline.
#[inline]
pub unsafe fn bind_pipeline_graphics<Gp>(&mut self, pipeline: Gp)
where Gp: GraphicsPipelineAbstract + Send + Sync + 'static
{
self.inner.bind_pipeline_graphics(&pipeline);
self.keep_alive.push(Box::new(pipeline) as Box<_>);
}
/// Calls `vkCmdBindPipeline` on the builder with a compute pipeline.
#[inline]
pub unsafe fn bind_pipeline_compute<Cp>(&mut self, pipeline: Cp)
where Cp: ComputePipelineAbstract + Send + Sync + 'static
{
self.inner.bind_pipeline_compute(&pipeline);
self.keep_alive.push(Box::new(pipeline) as Box<_>);
}
#[inline]
pub fn bind_descriptor_sets(&mut self) -> SyncCommandBufferBuilderBindDescriptorSets<P> {
SyncCommandBufferBuilderBindDescriptorSets {
builder: self,
inner: SmallVec::new(),
}
}
#[inline]
pub fn bind_vertex_buffers(&mut self) -> SyncCommandBufferBuilderBindVertexBuffer<P> {
SyncCommandBufferBuilderBindVertexBuffer {
builder: self,
inner: UnsafeCommandBufferBuilderBindVertexBuffer::new(),
}
}
/// Calls `vkCmdCopyBuffer` on the builder.
///
/// Does nothing if the list of regions is empty, as it would be a no-op and isn't a valid
/// usage of the command anyway.
#[inline]
pub unsafe fn copy_buffer<S, D, R>(&mut self, source: S, destination: D, regions: R)
where S: BufferAccess + Send + Sync + 'static,
D: BufferAccess + Send + Sync + 'static,
R: Iterator<Item = (usize, usize, usize)>
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&source, false,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_read: true, .. AccessFlagBits::none() });
binder.add_buffer(&destination, true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.copy_buffer(&source, &destination, regions);
binder.finish_buf(source);
binder.finish_buf(destination);
}
/// Calls `vkCmdCopyBufferToImage` on the builder.
///
/// Does nothing if the list of regions is empty, as it would be a no-op and isn't a valid
/// usage of the command anyway.
#[inline]
pub unsafe fn copy_buffer_to_image<S, D, R>(&mut self, source: S, destination: D,
dest_layout: ImageLayout, regions: R)
where S: BufferAccess + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static,
R: Iterator<Item = UnsafeCommandBufferBuilderBufferImageCopy>
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&source, false,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_read: true, .. AccessFlagBits::none() });
binder.add_image(&destination, true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() },
dest_layout, dest_layout);
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.copy_buffer_to_image(&source, &destination, dest_layout, regions);
binder.finish_buf(source);
binder.finish_img(destination);
}
/// Calls `vkCmdDispatch` on the builder.
#[inline]
pub unsafe fn dispatch(&mut self, dimensions: [u32; 3]) {
self.inner.dispatch(dimensions);
}
/// Calls `vkCmdDispatchIndirect` on the builder.
#[inline]
pub unsafe fn dispatch_indirect<B>(&mut self, buffer: B)
where B: BufferAccess + Send + Sync + 'static
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&buffer, false,
PipelineStages { draw_indirect: true, .. PipelineStages::none() }, // TODO: is draw_indirect correct?
AccessFlagBits { indirect_command_read: true, .. AccessFlagBits::none() });
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.dispatch_indirect(&buffer);
binder.finish_buf(buffer);
}
/// Calls `vkCmdDraw` on the builder.
#[inline]
pub unsafe fn draw(&mut self, vertex_count: u32, instance_count: u32, first_vertex: u32,
first_instance: u32)
{
self.inner.draw(vertex_count, instance_count, first_vertex, first_instance);
}
/// Calls `vkCmdDrawIndexed` on the builder.
#[inline]
pub unsafe fn draw_indexed(&mut self, index_count: u32, instance_count: u32, first_index: u32,
vertex_offset: i32, first_instance: u32)
{
self.inner.draw_indexed(index_count, instance_count, first_index, vertex_offset,
first_instance);
}
/// Calls `vkCmdDrawIndirect` on the builder.
#[inline]
pub unsafe fn draw_indirect<B>(&mut self, buffer: B, draw_count: u32, stride: u32)
where B: BufferAccess + Send + Sync + 'static
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&buffer, false,
PipelineStages { draw_indirect: true, .. PipelineStages::none() },
AccessFlagBits { indirect_command_read: true, .. AccessFlagBits::none() });
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.draw_indirect(&buffer, draw_count, stride);
binder.finish_buf(buffer);
}
/// Calls `vkCmdDrawIndexedIndirect` on the builder.
#[inline]
pub unsafe fn draw_indexed_indirect<B>(&mut self, buffer: B, draw_count: u32, stride: u32)
where B: BufferAccess + Send + Sync + 'static
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&buffer, false,
PipelineStages { draw_indirect: true, .. PipelineStages::none() },
AccessFlagBits { indirect_command_read: true, .. AccessFlagBits::none() });
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.draw_indexed_indirect(&buffer, draw_count, stride);
binder.finish_buf(buffer);
}
/// Calls `vkCmdEndRenderPass` on the builder.
#[inline]
pub unsafe fn end_render_pass(&mut self) {
self.inner.end_render_pass();
}
/// Calls `vkCmdFillBuffer` on the builder.
#[inline]
pub unsafe fn fill_buffer<B>(&mut self, buffer: B, data: u32)
where B: BufferAccess + Send + Sync + 'static
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&buffer, true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.fill_buffer(&buffer, data);
binder.finish_buf(buffer);
}
/// Calls `vkCmdNextSubpass` on the builder.
#[inline]
pub unsafe fn next_subpass(&mut self, secondary: bool) {
self.inner.next_subpass(secondary);
}
/// Calls `vkCmdPushConstants` on the builder.
#[inline]
pub unsafe fn push_constants<Pl, D>(&mut self, pipeline_layout: Pl, stages: ShaderStages,
offset: u32, size: u32, data: &D)
where Pl: PipelineLayoutAbstract + Send + Sync + 'static,
D: ?Sized
{
self.inner.push_constants(&pipeline_layout, stages, offset, size, data);
self.keep_alive.push(Box::new(pipeline_layout) as Box<_>);
}
/// Calls `vkCmdResetEvent` on the builder.
#[inline]
pub unsafe fn reset_event(&mut self, event: Arc<Event>, stages: PipelineStages) {
self.inner.reset_event(&event, stages);
self.keep_alive.push(Box::new(event) as Box<_>);
}
/// Calls `vkCmdSetBlendConstants` on the builder.
#[inline]
pub unsafe fn set_blend_constants(&mut self, constants: [f32; 4]) {
self.inner.set_blend_constants(constants);
}
/// Calls `vkCmdSetDepthBias` on the builder.
#[inline]
pub unsafe fn set_depth_bias(&mut self, constant_factor: f32, clamp: f32, slope_factor: f32) {
self.inner.set_depth_bias(constant_factor, clamp, slope_factor);
}
/// Calls `vkCmdSetDepthBounds` on the builder.
#[inline]
pub unsafe fn set_depth_bounds(&mut self, min: f32, max: f32) {
self.inner.set_depth_bounds(min, max);
}
/// Calls `vkCmdSetEvent` on the builder.
#[inline]
pub unsafe fn set_event(&mut self, event: Arc<Event>, stages: PipelineStages) {
self.inner.set_event(&event, stages);
self.keep_alive.push(Box::new(event) as Box<_>);
}
/// Calls `vkCmdSetLineWidth` on the builder.
#[inline]
pub unsafe fn set_line_width(&mut self, line_width: f32) {
self.inner.set_line_width(line_width);
}
// TODO: stencil states
/// Calls `vkCmdSetScissor` on the builder.
///
/// If the list is empty then the command is automatically ignored.
#[inline]
pub unsafe fn set_scissor<I>(&mut self, first_scissor: u32, scissors: I)
where I: Iterator<Item = Scissor>
{
self.inner.set_scissor(first_scissor, scissors);
}
/// Calls `vkCmdSetViewport` on the builder.
///
/// If the list is empty then the command is automatically ignored.
#[inline]
pub unsafe fn set_viewport<I>(&mut self, first_viewport: u32, viewports: I)
where I: Iterator<Item = Viewport>
{
self.inner.set_viewport(first_viewport, viewports);
}
/// Calls `vkCmdUpdateBuffer` on the builder.
#[inline]
pub unsafe fn update_buffer<B, D>(&mut self, buffer: B, data: &D)
where B: BufferAccess + Send + Sync + 'static,
D: ?Sized
{
let mut binder = start(&mut self.resources);
binder.add_buffer(&buffer, true,
PipelineStages { transfer: true, .. PipelineStages::none() },
AccessFlagBits { transfer_write: true, .. AccessFlagBits::none() });
binder.flush_pipeline_barrier(&mut self.inner);
self.inner.update_buffer(&buffer, data);
binder.finish_buf(buffer);
}
}
unsafe impl<P> DeviceOwned for SyncCommandBufferBuilder<P> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
pub struct SyncCommandBufferBuilderBindDescriptorSets<'b, P: 'b> {
builder: &'b mut SyncCommandBufferBuilder<P>,
inner: SmallVec<[Box<DescriptorSet + Send + Sync>; 12]>,
}
impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
/// Adds a descriptor set to the list.
#[inline]
pub fn add<S>(&mut self, set: S)
where S: DescriptorSet + Send + Sync + 'static
{
self.inner.push(Box::new(set));
}
#[inline]
pub unsafe fn submit<Pl, I>(self, graphics: bool, pipeline_layout: Pl, first_binding: u32,
dynamic_offsets: I)
where Pl: PipelineLayoutAbstract,
I: Iterator<Item = u32>,
{
self.builder.inner.bind_descriptor_sets(graphics, &pipeline_layout, first_binding,
self.inner.iter().map(|s| s.inner()),
dynamic_offsets);
for set in self.inner {
self.builder.keep_alive.push(Box::new(set));
}
}
}
/// Prototype for a `vkCmdBindVertexBuffers`.
pub struct SyncCommandBufferBuilderBindVertexBuffer<'a, P: 'a> {
builder: &'a mut SyncCommandBufferBuilder<P>,
inner: UnsafeCommandBufferBuilderBindVertexBuffer,
}
impl<'a, P> SyncCommandBufferBuilderBindVertexBuffer<'a, P> {
/// Adds a buffer to the list.
#[inline]
pub fn add<B>(&mut self, buffer: B)
where B: BufferAccess + Send + Sync + 'static
{
self.inner.add(&buffer);
// FIXME:
/*self.builder.add_buffer(buffer, false,
PipelineStages { vertex_input: true, .. PipelineStages::none() },
AccessFlagBits { vertex_attribute_read: true, .. AccessFlagBits::none() });*/
}
#[inline]
pub unsafe fn submit(self, first_binding: u32) {
self.builder.inner.bind_vertex_buffers(first_binding, self.inner);
}
}
pub struct SyncCommandBuffer<P> {
inner: UnsafeCommandBuffer<P>,
resources: FnvHashMap<Key<'static>, ResourceEntry>,
// Contains the objects other than the ones in `resources` that must be kept alive while the
// command buffer is being executed (eg. pipeline objects, ...).
keep_alive: Vec<Box<KeepAlive + Send + Sync>>,
}
unsafe impl<P> CommandBuffer for SyncCommandBuffer<P> {
type PoolAlloc = P;
#[inline]
fn inner(&self) -> &UnsafeCommandBuffer<Self::PoolAlloc> {
&self.inner
}
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
// TODO: if at any point we return an error, we can't recover
for (key, entry) in self.resources.iter() {
match key {
&Key::Buffer(ref buf) => {
let prev_err = match future.check_buffer_access(&buf, entry.exclusive, queue) {
Ok(_) => {
unsafe { buf.increase_gpu_lock(); }
continue;
},
Err(err) => err
};
match (buf.try_gpu_lock(entry.exclusive, queue), prev_err) {
(Ok(_), _) => (),
(Err(err), AccessCheckError::Unknown) => return Err(err.into()),
(_, AccessCheckError::Denied(err)) => return Err(err.into()),
}
},
&Key::Image(ref img) => {
let prev_err = match future.check_image_access(img, entry.initial_layout,
entry.exclusive, queue)
{
Ok(_) => {
unsafe { img.increase_gpu_lock(); }
continue;
},
Err(err) => err
};
match (img.try_gpu_lock(entry.exclusive, queue), prev_err) {
(Ok(_), _) => (),
(Err(err), AccessCheckError::Unknown) => return Err(err.into()),
(_, AccessCheckError::Denied(err)) => return Err(err.into()),
}
},
&Key::FramebufferAttachment(ref fb, idx) => {
let img = fb.attachments()[idx as usize].parent();
let prev_err = match future.check_image_access(img, entry.initial_layout,
entry.exclusive, queue)
{
Ok(_) => {
unsafe { img.increase_gpu_lock(); }
continue;
},
Err(err) => err
};
// FIXME: this is bad because dropping the submit sync layer doesn't drop the
// attachments of the framebuffer, meaning that they will stay locked
match (img.try_gpu_lock(entry.exclusive, queue), prev_err) {
(Ok(_), _) => (),
(Err(err), AccessCheckError::Unknown) => return Err(err.into()),
(_, AccessCheckError::Denied(err)) => return Err(err.into()),
}
},
&Key::BufferRef(_) => unreachable!(),
&Key::ImageRef(_) => unreachable!(),
}
}
// FIXME: pipeline barriers if necessary?
Ok(())
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
// TODO: check the queue family
if let Some(value) = self.resources.get(&Key::BufferRef(buffer)) {
if !value.exclusive && exclusive {
return Err(AccessCheckError::Denied(AccessError::ExclusiveDenied));
}
return Ok(Some((value.final_stages, value.final_access)));
}
Err(AccessCheckError::Unknown)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
// TODO: check the queue family
if let Some(value) = self.resources.get(&Key::ImageRef(image)) {
if layout != ImageLayout::Undefined && value.final_layout != layout {
return Err(AccessCheckError::Denied(AccessError::UnexpectedImageLayout {
allowed: value.final_layout,
requested: layout,
}));
}
if !value.exclusive && exclusive {
return Err(AccessCheckError::Denied(AccessError::ExclusiveDenied));
}
return Ok(Some((value.final_stages, value.final_access)));
}
Err(AccessCheckError::Unknown)
}
}
unsafe impl<P> DeviceOwned for SyncCommandBuffer<P> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}

File diff suppressed because it is too large Load Diff

View File

@ -15,16 +15,14 @@ use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use buffer::BufferAccess;
use command_buffer::cb::UnsafeCommandBuffer;
use command_buffer::pool::CommandPool;
use command_buffer::submit::SubmitAnyBuilder;
use command_buffer::submit::SubmitCommandBufferBuilder;
use command_buffer::sys::UnsafeCommandBuffer;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageLayout;
use image::ImageAccess;
use instance::QueueFamily;
use sync::now;
use sync::AccessError;
use sync::AccessCheckError;
@ -38,16 +36,18 @@ use VulkanObject;
pub unsafe trait CommandBuffer: DeviceOwned {
/// The command pool of the command buffer.
type Pool: CommandPool;
type PoolAlloc;
/// Returns the underlying `UnsafeCommandBuffer` of this command buffer.
fn inner(&self) -> &UnsafeCommandBuffer<Self::Pool>;
fn inner(&self) -> &UnsafeCommandBuffer<Self::PoolAlloc>;
/// Returns the queue family of the command buffer.
/*/// Returns the queue family of the command buffer.
#[inline]
fn queue_family(&self) -> QueueFamily {
fn queue_family(&self) -> QueueFamily
where Self::PoolAlloc: CommandPoolAlloc
{
self.inner().queue_family()
}
}*/
/// Checks whether this command buffer is allowed to be submitted after the `future` and on
/// the given queue.
@ -154,10 +154,10 @@ pub unsafe trait CommandBufferBuild {
}
unsafe impl<T> CommandBuffer for T where T: SafeDeref, T::Target: CommandBuffer {
type Pool = <T::Target as CommandBuffer>::Pool;
type PoolAlloc = <T::Target as CommandBuffer>::PoolAlloc;
#[inline]
fn inner(&self) -> &UnsafeCommandBuffer<Self::Pool> {
fn inner(&self) -> &UnsafeCommandBuffer<Self::PoolAlloc> {
(**self).inner()
}

View File

@ -0,0 +1,86 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::cmp;
use std::error;
use std::fmt;
use buffer::BufferAccess;
use device::Device;
use device::DeviceOwned;
use VulkanObject;
/// Checks whether a copy buffer command is valid.
///
/// # Panic
///
/// - Panics if the source and destination were not created with `device`.
///
// FIXME: type safety
pub fn check_copy_buffer<S, D>(device: &Device, source: &S, destination: &D)
-> Result<(), CheckCopyBufferError>
where S: ?Sized + BufferAccess,
D: ?Sized + BufferAccess
{
assert_eq!(source.inner().buffer.device().internal_object(), device.internal_object());
assert_eq!(destination.inner().buffer.device().internal_object(), device.internal_object());
if !source.inner().buffer.usage_transfer_src() {
return Err(CheckCopyBufferError::SourceMissingTransferUsage);
}
if !destination.inner().buffer.usage_transfer_dest() {
return Err(CheckCopyBufferError::DestinationMissingTransferUsage);
}
let size = cmp::min(source.size(), destination.size());
if source.conflicts_buffer(0, size, &destination, 0, size) {
return Err(CheckCopyBufferError::OverlappingRanges);
} else {
debug_assert!(!destination.conflicts_buffer(0, size, &source, 0, size));
}
Ok(())
}
/// Error that can happen from `check_copy_buffer`.
#[derive(Debug, Copy, Clone)]
pub enum CheckCopyBufferError {
/// The source buffer is missing the transfer source usage.
SourceMissingTransferUsage,
/// The destination buffer is missing the transfer destination usage.
DestinationMissingTransferUsage,
/// The source and destination are overlapping.
OverlappingRanges,
}
impl error::Error for CheckCopyBufferError {
#[inline]
fn description(&self) -> &str {
match *self {
CheckCopyBufferError::SourceMissingTransferUsage => {
"the source buffer is missing the transfer source usage"
},
CheckCopyBufferError::DestinationMissingTransferUsage => {
"the destination buffer is missing the transfer destination usage"
},
CheckCopyBufferError::OverlappingRanges => {
"the source and destination are overlapping"
},
}
}
}
impl fmt::Display for CheckCopyBufferError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}

View File

@ -0,0 +1,50 @@
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::error;
use std::fmt;
use buffer::BufferAccess;
use command_buffer::DynamicState;
use device::Device;
use device::DeviceOwned;
use pipeline::GraphicsPipelineAbstract;
use VulkanObject;
/// Checks whether states that are about to be set are correct.
pub fn check_dynamic_state_validity<Pl>(pipeline: &Pl, state: &DynamicState)
-> Result<(), CheckDynamicStateValidityError>
where Pl: GraphicsPipelineAbstract
{
let device = pipeline.device();
// FIXME:
Ok(())
}
/// Error that can happen when attempting to add a `fill_buffer` command.
#[derive(Debug, Copy, Clone)]
pub enum CheckDynamicStateValidityError {
}
impl error::Error for CheckDynamicStateValidityError {
#[inline]
fn description(&self) -> &str {
match *self {
}
}
}
impl fmt::Display for CheckDynamicStateValidityError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}

View File

@ -0,0 +1,68 @@
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::error;
use std::fmt;
use buffer::BufferAccess;
use device::Device;
use device::DeviceOwned;
use VulkanObject;
/// Checks whether a fill buffer command is valid.
///
/// # Panic
///
/// - Panics if the buffer not created with `device`.
///
pub fn check_fill_buffer<B>(device: &Device, buffer: &B) -> Result<(), CheckFillBufferError>
where B: ?Sized + BufferAccess
{
assert_eq!(buffer.inner().buffer.device().internal_object(), device.internal_object());
if !buffer.inner().buffer.usage_transfer_dest() {
return Err(CheckFillBufferError::BufferMissingUsage);
}
if buffer.inner().offset % 4 != 0 {
return Err(CheckFillBufferError::WrongAlignment);
}
Ok(())
}
/// Error that can happen when attempting to add a `fill_buffer` command.
#[derive(Debug, Copy, Clone)]
pub enum CheckFillBufferError {
/// The "transfer destination" usage must be enabled on the buffer.
BufferMissingUsage,
/// The data or size must be 4-bytes aligned.
WrongAlignment,
}
impl error::Error for CheckFillBufferError {
#[inline]
fn description(&self) -> &str {
match *self {
CheckFillBufferError::BufferMissingUsage => {
"the transfer destination usage must be enabled on the buffer"
},
CheckFillBufferError::WrongAlignment => {
"the offset or size are not aligned to 4 bytes"
},
}
}
}
impl fmt::Display for CheckFillBufferError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}

View File

@ -0,0 +1,20 @@
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Functions that check the validity of commands.
pub use self::copy_buffer::{check_copy_buffer, CheckCopyBufferError};
pub use self::dynamic_state::{check_dynamic_state_validity, CheckDynamicStateValidityError};
pub use self::fill_buffer::{check_fill_buffer, CheckFillBufferError};
pub use self::update_buffer::{check_update_buffer, CheckUpdateBufferError};
mod copy_buffer;
mod dynamic_state;
mod fill_buffer;
mod update_buffer;

View File

@ -0,0 +1,85 @@
// Copyright (c) 2017 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use buffer::BufferAccess;
use device::Device;
use device::DeviceOwned;
use VulkanObject;
/// Checks whether an update buffer command is valid.
///
/// # Panic
///
/// - Panics if the buffer not created with `device`.
///
pub fn check_update_buffer<B, D>(device: &Device, buffer: &B, data: &D)
-> Result<(), CheckUpdateBufferError>
where B: ?Sized + BufferAccess,
D: ?Sized
{
assert_eq!(buffer.inner().buffer.device().internal_object(), device.internal_object());
if !buffer.inner().buffer.usage_transfer_dest() {
return Err(CheckUpdateBufferError::BufferMissingUsage);
}
if buffer.inner().offset % 4 != 0 {
return Err(CheckUpdateBufferError::WrongAlignment);
}
let size = cmp::min(buffer.size(), mem::size_of_val(data));
if size % 4 != 0 {
return Err(CheckUpdateBufferError::WrongAlignment);
}
if size > 65536 {
return Err(CheckUpdateBufferError::DataTooLarge);
}
Ok(())
}
/// Error that can happen when attempting to add an `update_buffer` command.
#[derive(Debug, Copy, Clone)]
pub enum CheckUpdateBufferError {
/// The "transfer destination" usage must be enabled on the buffer.
BufferMissingUsage,
/// The data or size must be 4-bytes aligned.
WrongAlignment,
/// The data must not be larger than 64k bytes.
DataTooLarge,
}
impl error::Error for CheckUpdateBufferError {
#[inline]
fn description(&self) -> &str {
match *self {
CheckUpdateBufferError::BufferMissingUsage => {
"the transfer destination usage must be enabled on the buffer"
},
CheckUpdateBufferError::WrongAlignment => {
"the offset or size are not aligned to 4 bytes"
},
CheckUpdateBufferError::DataTooLarge => "data is too large",
}
}
}
impl fmt::Display for CheckUpdateBufferError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}

View File

@ -17,13 +17,7 @@ use image::ImageAccess;
/// A collection of descriptor set objects.
pub unsafe trait DescriptorSetsCollection {
/// Returns the number of sets in the collection. Includes possibly empty sets.
///
/// In other words, this should be equal to the highest set number plus one.
fn num_sets(&self) -> usize;
/// Returns the descriptor set with the given id. Returns `None` if the set is empty.
fn descriptor_set(&self, set: usize) -> Option<&UnsafeDescriptorSet>;
fn into_vec(self) -> Vec<Box<DescriptorSet + Send + Sync>>;
/// Returns the number of descriptors in the set. Includes possibly empty descriptors.
///
@ -44,13 +38,8 @@ pub unsafe trait DescriptorSetsCollection {
unsafe impl DescriptorSetsCollection for () {
#[inline]
fn num_sets(&self) -> usize {
0
}
#[inline]
fn descriptor_set(&self, set: usize) -> Option<&UnsafeDescriptorSet> {
None
fn into_vec(self) -> Vec<Box<DescriptorSet + Send + Sync>> {
vec![]
}
#[inline]
@ -75,19 +64,11 @@ unsafe impl DescriptorSetsCollection for () {
}
unsafe impl<T> DescriptorSetsCollection for T
where T: DescriptorSet
where T: DescriptorSet + Send + Sync + 'static
{
#[inline]
fn num_sets(&self) -> usize {
1
}
#[inline]
fn descriptor_set(&self, set: usize) -> Option<&UnsafeDescriptorSet> {
match set {
0 => Some(self.inner()),
_ => None
}
fn into_vec(self) -> Vec<Box<DescriptorSet + Send + Sync>> {
vec![Box::new(self) as Box<_>]
}
#[inline]
@ -118,36 +99,16 @@ unsafe impl<T> DescriptorSetsCollection for T
}
macro_rules! impl_collection {
($first:ident $(, $others:ident)*) => (
unsafe impl<$first$(, $others)*> DescriptorSetsCollection for ($first, $($others),*)
where $first: DescriptorSet + DescriptorSetDesc
$(, $others: DescriptorSet + DescriptorSetDesc)*
($first:ident $(, $others:ident)+) => (
unsafe impl<$first$(, $others)+> DescriptorSetsCollection for ($first, $($others),+)
where $first: DescriptorSet + DescriptorSetDesc + Send + Sync + 'static
$(, $others: DescriptorSet + DescriptorSetDesc + Send + Sync + 'static)*
{
#[inline]
fn num_sets(&self) -> usize {
#![allow(non_snake_case)]
1 $( + {let $others=0;1})*
}
#[inline]
fn descriptor_set(&self, mut set: usize) -> Option<&UnsafeDescriptorSet> {
#![allow(non_snake_case)]
#![allow(unused_mut)] // For the `set` parameter.
if set == 0 {
return Some(self.0.inner());
}
let &(_, $(ref $others,)*) = self;
$(
set -= 1;
if set == 0 {
return Some($others.inner());
}
)*
None
fn into_vec(self) -> Vec<Box<DescriptorSet + Send + Sync>> {
let mut list = self.1.into_vec();
list.insert(0, Box::new(self.0) as Box<_>);
list
}
#[inline]
@ -219,10 +180,10 @@ macro_rules! impl_collection {
}
}
impl_collection!($($others),*);
impl_collection!($($others),+);
);
() => ();
($i:ident) => ();
}
impl_collection!(Z, Y, X, W, V, U, T, S, R, Q, P, O, N, M, L, K, J, I, H, G, F, E, D, C, B, A);

View File

@ -21,7 +21,6 @@ use std::u32;
use smallvec::SmallVec;
use buffer::BufferAccess;
use buffer::BufferInner;
use device::Device;
use device::DeviceOwned;
use descriptor::PipelineLayoutAbstract;
@ -1324,7 +1323,7 @@ unsafe impl<Mv, L, Rp, S> VertexSource<S> for GraphicsPipeline<Mv, L, Rp>
where Mv: VertexSource<S>
{
#[inline]
fn decode<'l>(&self, s: &'l S) -> (Vec<BufferInner<'l>>, usize, usize) {
fn decode(&self, s: S) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
self.vertex_definition.decode(s)
}
}

View File

@ -12,7 +12,6 @@ use std::fmt;
use std::sync::Arc;
use buffer::BufferAccess;
use buffer::BufferInner;
use format::Format;
use pipeline::vertex::VertexMemberTy;
use SafeDeref;
@ -109,12 +108,12 @@ pub unsafe trait VertexSource<L> {
// TODO: return error if problem
// TODO: better than a Vec
// TODO: return a struct instead
fn decode<'l>(&self, &'l L) -> (Vec<BufferInner<'l>>, usize, usize);
fn decode(&self, L) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize);
}
unsafe impl<L, T> VertexSource<L> for T where T: SafeDeref, T::Target: VertexSource<L> {
#[inline]
fn decode<'l>(&self, list: &'l L) -> (Vec<BufferInner<'l>>, usize, usize) {
fn decode(&self, list: L) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
(**self).decode(list)
}
}

View File

@ -13,7 +13,6 @@ use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
use buffer::BufferAccess;
use buffer::BufferInner;
use buffer::TypedBufferAccess;
use pipeline::shader::ShaderInterfaceDef;
use pipeline::vertex::AttributeInfo;
@ -88,21 +87,25 @@ unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for OneVert
where T: Vertex, U: Vertex
{
#[inline]
fn decode<'l>(&self, source: &'l Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<BufferInner<'l>>, usize, usize) {
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
// FIXME: safety
assert_eq!(source.len(), 2);
let len = source[0].size() / mem::size_of::<T>();
let inst = source[0].size() / mem::size_of::<U>();
(vec![source[0].inner(), source[1].inner()], len, inst)
let s0 = source.remove(0);
let s1 = source.remove(0);
(vec![Box::new(s0) as Box<_>, Box::new(s1) as Box<_>], len, inst)
}
}
unsafe impl<'a, T, U, Bt, Bu> VertexSource<(Bt, Bu)> for OneVertexOneInstanceDefinition<T, U>
where T: Vertex, Bt: TypedBufferAccess<Content = [T]>,
U: Vertex, Bu: TypedBufferAccess<Content = [U]>
where T: Vertex, Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
U: Vertex, Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
{
#[inline]
fn decode<'l>(&self, source: &'l (Bt, Bu)) -> (Vec<BufferInner<'l>>, usize, usize) {
(vec![source.0.inner(), source.1.inner()], source.0.len(), source.1.len())
fn decode(&self, source: (Bt, Bu)) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
let s1l = source.0.len();
let s2l = source.1.len();
(vec![Box::new(source.0) as Box<_>, Box::new(source.1) as Box<_>], s1l, s2l)
}
}

View File

@ -14,7 +14,6 @@ use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
use buffer::BufferAccess;
use buffer::BufferInner;
use buffer::TypedBufferAccess;
use pipeline::shader::ShaderInterfaceDef;
use pipeline::vertex::AttributeInfo;
@ -81,19 +80,20 @@ unsafe impl<V> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for SingleBuff
where V: Vertex
{
#[inline]
fn decode<'l>(&self, source: &'l Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<BufferInner<'l>>, usize, usize) {
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
// FIXME: safety
assert_eq!(source.len(), 1);
let len = source[0].size() / mem::size_of::<V>();
(vec![source[0].inner()], len, 1)
(vec![Box::new(source.remove(0))], len, 1)
}
}
unsafe impl<'a, B, V> VertexSource<B> for SingleBufferDefinition<V>
where B: TypedBufferAccess<Content = [V]>, V: Vertex
where B: TypedBufferAccess<Content = [V]> + Send + Sync + 'static, V: Vertex
{
#[inline]
fn decode<'l>(&self, source: &'l B) -> (Vec<BufferInner<'l>>, usize, usize) {
(vec![source.inner()], source.len(), 1)
fn decode(&self, source: B) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
let len = source.len();
(vec![Box::new(source) as Box<_>], len, 1)
}
}

View File

@ -13,7 +13,6 @@ use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
use buffer::BufferAccess;
use buffer::BufferInner;
use buffer::TypedBufferAccess;
use pipeline::shader::ShaderInterfaceDef;
use pipeline::vertex::AttributeInfo;
@ -88,18 +87,18 @@ unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for TwoBuff
where T: Vertex, U: Vertex
{
#[inline]
fn decode<'l>(&self, source: &'l Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<BufferInner<'l>>, usize, usize) {
fn decode(&self, source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
unimplemented!() // FIXME: implement
}
}
unsafe impl<'a, T, U, Bt, Bu> VertexSource<(Bt, Bu)> for TwoBuffersDefinition<T, U>
where T: Vertex, Bt: TypedBufferAccess<Content = [T]>,
U: Vertex, Bu: TypedBufferAccess<Content = [U]>
where T: Vertex, Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
U: Vertex, Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
{
#[inline]
fn decode<'l>(&self, source: &'l (Bt, Bu)) -> (Vec<BufferInner<'l>>, usize, usize) {
fn decode(&self, source: (Bt, Bu)) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
let vertices = [source.0.len(), source.1.len()].iter().cloned().min().unwrap();
(vec![source.0.inner(), source.1.inner()], vertices, 1)
(vec![Box::new(source.0) as Box<_>, Box::new(source.1) as Box<_>], vertices, 1)
}
}