mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-23 07:15:31 +00:00
Merge branch 'master' into initial-layout-trans
This commit is contained in:
commit
1818ac5bc7
@ -192,7 +192,7 @@ impl<T: ?Sized> CpuBufferPool<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> CpuBufferPool<T, A> where A: MemoryPool, T: 'static {
|
||||
impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
/// Sets the capacity to `capacity`, or does nothing if the capacity is already higher.
|
||||
///
|
||||
/// Since this can involve a memory allocation, an `OomError` can happen.
|
||||
@ -394,7 +394,7 @@ unsafe impl<T: ?Sized, A> Buffer for CpuBufferPoolSubbuffer<T, A>
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBuffer for CpuBufferPoolSubbuffer<T, A>
|
||||
where A: MemoryPool, T: 'static
|
||||
where A: MemoryPool
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
@ -460,7 +460,7 @@ unsafe impl<T: ?Sized, A> BufferAccess for CpuBufferPoolSubbuffer<T, A>
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBufferAccess for CpuBufferPoolSubbuffer<T, A>
|
||||
where A: MemoryPool, T: 'static + Copy + Clone
|
||||
where A: MemoryPool
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ pub struct DeviceLocalBufferAccess<P>(P);
|
||||
|
||||
unsafe impl<T: ?Sized, A> Buffer for Arc<DeviceLocalBuffer<T, A>>
|
||||
where T: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
A: MemoryPool + 'static
|
||||
{
|
||||
type Access = DeviceLocalBufferAccess<Arc<DeviceLocalBuffer<T, A>>>;
|
||||
|
||||
@ -181,7 +181,7 @@ unsafe impl<T: ?Sized, A> Buffer for Arc<DeviceLocalBuffer<T, A>>
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBuffer for Arc<DeviceLocalBuffer<T, A>>
|
||||
where T: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
A: MemoryPool + 'static
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
@ -189,7 +189,7 @@ unsafe impl<T: ?Sized, A> TypedBuffer for Arc<DeviceLocalBuffer<T, A>>
|
||||
unsafe impl<P, T: ?Sized, A> BufferAccess for DeviceLocalBufferAccess<P>
|
||||
where P: SafeDeref<Target = DeviceLocalBuffer<T, A>>,
|
||||
T: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
A: MemoryPool + 'static
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner {
|
||||
@ -220,7 +220,7 @@ unsafe impl<P, T: ?Sized, A> BufferAccess for DeviceLocalBufferAccess<P>
|
||||
unsafe impl<P, T: ?Sized, A> TypedBufferAccess for DeviceLocalBufferAccess<P>
|
||||
where P: SafeDeref<Target = DeviceLocalBuffer<T, A>>,
|
||||
T: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
A: MemoryPool + 'static
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
@ -228,7 +228,7 @@ unsafe impl<P, T: ?Sized, A> TypedBufferAccess for DeviceLocalBufferAccess<P>
|
||||
unsafe impl<P, T: ?Sized, A> DeviceOwned for DeviceLocalBufferAccess<P>
|
||||
where P: SafeDeref<Target = DeviceLocalBuffer<T, A>>,
|
||||
T: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
A: MemoryPool + 'static
|
||||
{
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
|
@ -18,12 +18,17 @@
|
||||
//! The buffer will be stored in device-local memory if possible
|
||||
//!
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::iter;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use buffer::CpuAccessibleBuffer;
|
||||
use buffer::sys::BufferCreationError;
|
||||
use buffer::sys::SparseLevel;
|
||||
use buffer::sys::UnsafeBuffer;
|
||||
@ -33,6 +38,14 @@ use buffer::traits::BufferInner;
|
||||
use buffer::traits::Buffer;
|
||||
use buffer::traits::TypedBuffer;
|
||||
use buffer::traits::TypedBufferAccess;
|
||||
use command_buffer::cb::AddCommand;
|
||||
use command_buffer::commands_raw::CmdCopyBuffer;
|
||||
use command_buffer::commands_raw::CmdCopyBufferError;
|
||||
use command_buffer::AutoCommandBufferBuilder;
|
||||
use command_buffer::CommandBuffer;
|
||||
use command_buffer::CommandBufferBuilder;
|
||||
use command_buffer::CommandBufferBuilderError;
|
||||
use command_buffer::CommandBufferExecFuture;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use device::Queue;
|
||||
@ -40,66 +53,208 @@ use instance::QueueFamily;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::MemoryPool;
|
||||
use memory::pool::MemoryPoolAlloc;
|
||||
use memory::pool::StdMemoryPool;
|
||||
use memory::pool::StdMemoryPoolAlloc;
|
||||
use sync::NowFuture;
|
||||
use sync::Sharing;
|
||||
|
||||
use OomError;
|
||||
|
||||
/// Buffer that is written once then read for as long as it is alive.
|
||||
pub struct ImmutableBuffer<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool {
|
||||
// TODO: implement Debug
|
||||
pub struct ImmutableBuffer<T: ?Sized, A = StdMemoryPoolAlloc> {
|
||||
// Inner content.
|
||||
inner: UnsafeBuffer,
|
||||
|
||||
memory: A::Alloc,
|
||||
// Memory allocated for the buffer.
|
||||
memory: A,
|
||||
|
||||
// True if the `ImmutableBufferInitialization` object was used by the GPU then dropped.
|
||||
// This means that the `ImmutableBuffer` can be used as much as we want without any restriction.
|
||||
initialized: AtomicBool,
|
||||
|
||||
// Queue families allowed to access this buffer.
|
||||
queue_families: SmallVec<[u32; 4]>,
|
||||
|
||||
started_reading: AtomicBool,
|
||||
|
||||
// Necessary to have the appropriate template parameter.
|
||||
marker: PhantomData<Box<T>>,
|
||||
}
|
||||
|
||||
impl<T> ImmutableBuffer<T> {
|
||||
/// Builds a new buffer. Only allowed for sized data.
|
||||
#[inline]
|
||||
pub fn new<'a, I>(device: &Arc<Device>, usage: &Usage, queue_families: I)
|
||||
-> Result<Arc<ImmutableBuffer<T>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
// TODO: make this prettier
|
||||
type ImmutableBufferFromBufferFuture = CommandBufferExecFuture<NowFuture, ::command_buffer::cb::SubmitSyncLayer<::command_buffer::cb::AbstractStorageLayer<::command_buffer::cb::UnsafeCommandBuffer<Arc<::command_buffer::pool::standard::StandardCommandPool>>>>>;
|
||||
|
||||
impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
/// Builds an `ImmutableBuffer` from some data.
|
||||
///
|
||||
/// This function builds a memory-mapped intermediate buffer, writes the data to it, builds a
|
||||
/// command buffer that copies from this intermediate buffer to the final buffer, and finally
|
||||
/// submits the command buffer as a future.
|
||||
///
|
||||
/// This function returns two objects: the newly-created buffer, and a future representing
|
||||
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
|
||||
/// either submit your operation after this future, or execute this future and wait for it to
|
||||
/// be finished before submitting your own operation.
|
||||
pub fn from_data<'a, I>(data: T, usage: &Usage, queue_families: I, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
T: 'static + Send + Sync + Sized,
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_data(queue.device(), &Usage::transfer_source(),
|
||||
iter::once(queue.family()), data)?;
|
||||
ImmutableBuffer::from_buffer(source, usage, queue_families, queue)
|
||||
}
|
||||
|
||||
/// Builds an `ImmutableBuffer` that copies its data from another buffer.
|
||||
///
|
||||
/// This function returns two objects: the newly-created buffer, and a future representing
|
||||
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
|
||||
/// either submit your operation after this future, or execute this future and wait for it to
|
||||
/// be finished before submitting your own operation.
|
||||
pub fn from_buffer<'a, B, I>(source: B, usage: &Usage, queue_families: I, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
|
||||
where B: Buffer + TypedBuffer<Content = T> + DeviceOwned, // TODO: remove + DeviceOwned once Buffer requires it
|
||||
B::Access: 'static + Clone + Send + Sync,
|
||||
I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
T: 'static + Send + Sync,
|
||||
{
|
||||
let cb = AutoCommandBufferBuilder::new(source.device().clone(), queue.family())?;
|
||||
|
||||
let (buf, cb) = match ImmutableBuffer::from_buffer_with_builder(source, usage,
|
||||
queue_families, cb)
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(ImmutableBufferFromBufferWithBuilderError::OomError(err)) => return Err(err),
|
||||
Err(ImmutableBufferFromBufferWithBuilderError::CommandBufferBuilderError(_)) => {
|
||||
// Example errors that can trigger this: forbidden while inside render pass,
|
||||
// ranges overlapping between buffers, missing usage in one of the buffers, etc.
|
||||
// None of them can actually happen.
|
||||
unreachable!()
|
||||
},
|
||||
};
|
||||
|
||||
let future = cb.build()?.execute(queue);
|
||||
Ok((buf, future))
|
||||
}
|
||||
|
||||
/// Builds an `ImmutableBuffer` that copies its data from another buffer.
|
||||
pub fn from_buffer_with_builder<'a, B, I, Cb, O>(source: B, usage: &Usage, queue_families: I,
|
||||
builder: Cb)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, O), ImmutableBufferFromBufferWithBuilderError>
|
||||
where B: Buffer + TypedBuffer<Content = T> + DeviceOwned, // TODO: remove + DeviceOwned once Buffer requires it
|
||||
I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
Cb: CommandBufferBuilder +
|
||||
AddCommand<CmdCopyBuffer<B::Access, ImmutableBufferInitialization<T>>, Out = O>,
|
||||
{
|
||||
unsafe {
|
||||
ImmutableBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
|
||||
// We automatically set `transfer_dest` to true in order to avoid annoying errors.
|
||||
let actual_usage = Usage {
|
||||
transfer_dest: true,
|
||||
.. *usage
|
||||
};
|
||||
|
||||
let (buffer, init) = ImmutableBuffer::raw(source.device().clone(), source.size(),
|
||||
&actual_usage, queue_families)?;
|
||||
|
||||
let builder = builder.copy_buffer(source, init)?;
|
||||
Ok((buffer, builder))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ImmutableBuffer<T> {
|
||||
/// Builds a new buffer with uninitialized data. Only allowed for sized data.
|
||||
///
|
||||
/// Returns two things: the buffer, and a special access that should be used for the initial
|
||||
/// upload to the buffer.
|
||||
///
|
||||
/// You will get an error if you try to use the buffer before using the initial upload access.
|
||||
/// However this function doesn't check whether you actually used this initial upload to fill
|
||||
/// the buffer like you're supposed to do.
|
||||
///
|
||||
/// You will also get an error if you try to get exclusive access to the final buffer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
|
||||
/// data, otherwise the content is undefined.
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn uninitialized<'a, I>(device: Arc<Device>, usage: &Usage, queue_families: I)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
ImmutableBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ImmutableBuffer<[T]> {
|
||||
/// Builds a new buffer. Can be used for arrays.
|
||||
pub fn from_iter<'a, D, I>(data: D, usage: &Usage, queue_families: I, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
D: ExactSizeIterator<Item = T>,
|
||||
T: 'static + Send + Sync + Sized,
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_iter(queue.device(), &Usage::transfer_source(),
|
||||
iter::once(queue.family()), data)?;
|
||||
ImmutableBuffer::from_buffer(source, usage, queue_families, queue)
|
||||
}
|
||||
|
||||
/// Builds a new buffer with uninitialized data. Can be used for arrays.
|
||||
///
|
||||
/// Returns two things: the buffer, and a special access that should be used for the initial
|
||||
/// upload to the buffer.
|
||||
///
|
||||
/// You will get an error if you try to use the buffer before using the initial upload access.
|
||||
/// However this function doesn't check whether you actually used this initial upload to fill
|
||||
/// the buffer like you're supposed to do.
|
||||
///
|
||||
/// You will also get an error if you try to get exclusive access to the final buffer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
|
||||
/// data, otherwise the content is undefined.
|
||||
///
|
||||
#[inline]
|
||||
pub fn array<'a, I>(device: &Arc<Device>, len: usize, usage: &Usage, queue_families: I)
|
||||
-> Result<Arc<ImmutableBuffer<[T]>>, OomError>
|
||||
pub unsafe fn uninitialized_array<'a, I>(device: Arc<Device>, len: usize, usage: &Usage,
|
||||
queue_families: I)
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferInitialization<[T]>), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
ImmutableBuffer::raw(device, len * mem::size_of::<T>(), usage, queue_families)
|
||||
}
|
||||
ImmutableBuffer::raw(device, len * mem::size_of::<T>(), usage, queue_families)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
/// Builds a new buffer without checking the size.
|
||||
/// Builds a new buffer without checking the size and granting free access for the initial
|
||||
/// upload.
|
||||
///
|
||||
/// Returns two things: the buffer, and a special access that should be used for the initial
|
||||
/// upload to the buffer.
|
||||
/// You will get an error if you try to use the buffer before using the initial upload access.
|
||||
/// However this function doesn't check whether you used this initial upload to fill the buffer.
|
||||
/// You will also get an error if you try to get exclusive access to the final buffer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// You must ensure that the size that you pass is correct for `T`.
|
||||
/// - You must ensure that the size that you pass is correct for `T`.
|
||||
/// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
|
||||
/// data.
|
||||
///
|
||||
pub unsafe fn raw<'a, I>(device: &Arc<Device>, size: usize, usage: &Usage, queue_families: I)
|
||||
-> Result<Arc<ImmutableBuffer<T>>, OomError>
|
||||
#[inline]
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: &Usage, queue_families: I)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id())
|
||||
.collect::<SmallVec<[u32; 4]>>();
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
|
||||
ImmutableBuffer::raw_impl(device, size, usage, queue_families)
|
||||
}
|
||||
|
||||
// Internal implementation of `raw`. This is separated from `raw` so that it doesn't need to be
|
||||
// inlined.
|
||||
unsafe fn raw_impl(device: Arc<Device>, size: usize, usage: &Usage,
|
||||
queue_families: SmallVec<[u32; 4]>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
|
||||
{
|
||||
let (buffer, mem_reqs) = {
|
||||
let sharing = if queue_families.len() >= 2 {
|
||||
Sharing::Concurrent(queue_families.iter().cloned())
|
||||
@ -107,7 +262,7 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
Sharing::Exclusive
|
||||
};
|
||||
|
||||
match UnsafeBuffer::new(device, size, &usage, sharing, SparseLevel::none()) {
|
||||
match UnsafeBuffer::new(&device, size, &usage, sharing, SparseLevel::none()) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::OomError(err)) => return Err(err),
|
||||
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
|
||||
@ -124,22 +279,29 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
device_local.chain(any).next().unwrap()
|
||||
};
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(device), mem_ty,
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
try!(buffer.bind_memory(mem.memory(), mem.offset()));
|
||||
|
||||
Ok(Arc::new(ImmutableBuffer {
|
||||
let final_buf = Arc::new(ImmutableBuffer {
|
||||
inner: buffer,
|
||||
memory: mem,
|
||||
queue_families: queue_families,
|
||||
started_reading: AtomicBool::new(false),
|
||||
initialized: AtomicBool::new(false),
|
||||
marker: PhantomData,
|
||||
}))
|
||||
});
|
||||
|
||||
let initialization = ImmutableBufferInitialization {
|
||||
buffer: final_buf.clone(),
|
||||
used: Arc::new(AtomicBool::new(false)),
|
||||
};
|
||||
|
||||
Ok((final_buf, initialization))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, A> ImmutableBuffer<T, A> where A: MemoryPool {
|
||||
impl<T: ?Sized, A> ImmutableBuffer<T, A> {
|
||||
/// Returns the device used to create this buffer.
|
||||
#[inline]
|
||||
pub fn device(&self) -> &Arc<Device> {
|
||||
@ -156,10 +318,7 @@ impl<T: ?Sized, A> ImmutableBuffer<T, A> where A: MemoryPool {
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: wrong
|
||||
unsafe impl<T: ?Sized, A> Buffer for Arc<ImmutableBuffer<T, A>>
|
||||
where T: 'static + Send + Sync, A: MemoryPool
|
||||
{
|
||||
unsafe impl<T: ?Sized, A> Buffer for Arc<ImmutableBuffer<T, A>> {
|
||||
type Access = Self;
|
||||
|
||||
#[inline]
|
||||
@ -173,15 +332,11 @@ unsafe impl<T: ?Sized, A> Buffer for Arc<ImmutableBuffer<T, A>>
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBuffer for Arc<ImmutableBuffer<T, A>>
|
||||
where T: 'static + Send + Sync, A: MemoryPool
|
||||
{
|
||||
unsafe impl<T: ?Sized, A> TypedBuffer for Arc<ImmutableBuffer<T, A>> {
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBuffer<T, A>
|
||||
where T: 'static + Send + Sync, A: MemoryPool
|
||||
{
|
||||
unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBuffer<T, A> {
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner {
|
||||
BufferInner {
|
||||
@ -192,26 +347,295 @@ unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBuffer<T, A>
|
||||
|
||||
#[inline]
|
||||
fn try_gpu_lock(&self, exclusive_access: bool, queue: &Queue) -> bool {
|
||||
true // FIXME:
|
||||
if exclusive_access {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.initialized.load(Ordering::Relaxed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn increase_gpu_lock(&self) {
|
||||
// FIXME:
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBuffer<T, A>
|
||||
where T: 'static + Send + Sync, A: MemoryPool
|
||||
{
|
||||
unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBuffer<T, A> {
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBuffer<T, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBuffer<T, A> {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.inner.device()
|
||||
}
|
||||
}
|
||||
|
||||
/// Access to the immutable buffer that can be used for the initial upload.
|
||||
//#[derive(Debug)] // TODO:
|
||||
pub struct ImmutableBufferInitialization<T: ?Sized, A = StdMemoryPoolAlloc> {
|
||||
buffer: Arc<ImmutableBuffer<T, A>>,
|
||||
used: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBufferInitialization<T, A> {
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner {
|
||||
self.buffer.inner()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_gpu_lock(&self, exclusive_access: bool, queue: &Queue) -> bool {
|
||||
!self.used.compare_and_swap(false, true, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn increase_gpu_lock(&self) {
|
||||
debug_assert!(self.used.load(Ordering::Relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBufferInitialization<T, A> {
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> Buffer for ImmutableBufferInitialization<T, A> {
|
||||
type Access = Self;
|
||||
|
||||
#[inline]
|
||||
fn access(self) -> Self {
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> usize {
|
||||
self.buffer.inner.size()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBuffer for ImmutableBufferInitialization<T, A> {
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBufferInitialization<T, A> {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.buffer.inner.device()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, A> Clone for ImmutableBufferInitialization<T, A> {
|
||||
#[inline]
|
||||
fn clone(&self) -> ImmutableBufferInitialization<T, A> {
|
||||
ImmutableBufferInitialization {
|
||||
buffer: self.buffer.clone(),
|
||||
used: self.used.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, A> Drop for ImmutableBufferInitialization<T, A> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
if self.used.load(Ordering::Relaxed) {
|
||||
self.buffer.initialized.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that can happen when creating a `CmdCopyBuffer`.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum ImmutableBufferFromBufferWithBuilderError {
|
||||
/// Out of memory.
|
||||
OomError(OomError),
|
||||
/// Error while adding the command to the builder.
|
||||
CommandBufferBuilderError(CommandBufferBuilderError<CmdCopyBufferError>),
|
||||
}
|
||||
|
||||
impl error::Error for ImmutableBufferFromBufferWithBuilderError {
|
||||
#[inline]
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
ImmutableBufferFromBufferWithBuilderError::OomError(_) => {
|
||||
"out of memory"
|
||||
},
|
||||
ImmutableBufferFromBufferWithBuilderError::CommandBufferBuilderError(_) => {
|
||||
"error while adding the command to the builder"
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
ImmutableBufferFromBufferWithBuilderError::OomError(ref err) => Some(err),
|
||||
ImmutableBufferFromBufferWithBuilderError::CommandBufferBuilderError(ref err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ImmutableBufferFromBufferWithBuilderError {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "{}", error::Error::description(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OomError> for ImmutableBufferFromBufferWithBuilderError {
|
||||
#[inline]
|
||||
fn from(err: OomError) -> ImmutableBufferFromBufferWithBuilderError {
|
||||
ImmutableBufferFromBufferWithBuilderError::OomError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CommandBufferBuilderError<CmdCopyBufferError>> for ImmutableBufferFromBufferWithBuilderError {
|
||||
#[inline]
|
||||
fn from(err: CommandBufferBuilderError<CmdCopyBufferError>) -> ImmutableBufferFromBufferWithBuilderError {
|
||||
ImmutableBufferFromBufferWithBuilderError::CommandBufferBuilderError(err)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use buffer::cpu_access::CpuAccessibleBuffer;
|
||||
use buffer::immutable::ImmutableBuffer;
|
||||
use buffer::sys::Usage;
|
||||
use command_buffer::AutoCommandBufferBuilder;
|
||||
use command_buffer::CommandBuffer;
|
||||
use command_buffer::CommandBufferBuilder;
|
||||
use sync::GpuFuture;
|
||||
|
||||
#[test]
|
||||
fn from_data_working() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32, &Usage::all(),
|
||||
iter::once(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
|
||||
let dest = CpuAccessibleBuffer::from_data(&device, &Usage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(buffer, dest.clone()).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone())
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
|
||||
let dest_content = dest.read().unwrap();
|
||||
assert_eq!(*dest_content, 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_iter_working() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_iter((0 .. 512u32).map(|n| n * 2), &Usage::all(),
|
||||
iter::once(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
|
||||
let dest = CpuAccessibleBuffer::from_iter(&device, &Usage::all(),
|
||||
iter::once(queue.family()),
|
||||
(0 .. 512).map(|_| 0u32)).unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(buffer, dest.clone()).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone())
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
|
||||
let dest_content = dest.read().unwrap();
|
||||
for (n, &v) in dest_content.iter().enumerate() {
|
||||
assert_eq!(n * 2, v as usize);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic] // TODO: check Result error instead of panicking
|
||||
fn writing_forbidden() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32, &Usage::all(),
|
||||
iter::once(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.fill_buffer(buffer, 50).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone())
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic] // TODO: check Result error instead of panicking
|
||||
fn read_uninitialized_forbidden() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), &Usage::all(),
|
||||
iter::once(queue.family())).unwrap()
|
||||
};
|
||||
|
||||
let src = CpuAccessibleBuffer::from_data(&device, &Usage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(src, buffer).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone())
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_then_read_same_cb() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, init) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), &Usage::all(),
|
||||
iter::once(queue.family())).unwrap()
|
||||
};
|
||||
|
||||
let src = CpuAccessibleBuffer::from_data(&device, &Usage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(src.clone(), init).unwrap()
|
||||
.copy_buffer(buffer, src.clone()).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone())
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore] // TODO: doesn't work because the submit sync layer isn't properly implemented
|
||||
fn init_then_read_same_future() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, init) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), &Usage::all(),
|
||||
iter::once(queue.family())).unwrap()
|
||||
};
|
||||
|
||||
let src = CpuAccessibleBuffer::from_data(&device, &Usage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
|
||||
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(src.clone(), init).unwrap()
|
||||
.build().unwrap();
|
||||
|
||||
let cb2 = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(buffer, src.clone()).unwrap()
|
||||
.build().unwrap();
|
||||
|
||||
let _ = cb1.execute(queue.clone())
|
||||
.then_execute(queue.clone(), cb2)
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
}
|
||||
|
||||
// TODO: write tons of tests that try to exploit loopholes
|
||||
// this isn't possible yet because checks aren't correctly implemented yet
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ impl<T: ?Sized, B> Clone for BufferSlice<T, B>
|
||||
impl<T: ?Sized, B> BufferSlice<T, B> {
|
||||
#[inline]
|
||||
pub fn from_typed_buffer(r: B) -> BufferSlice<T, B>
|
||||
where B: TypedBuffer<Content = T>, T: 'static
|
||||
where B: TypedBuffer<Content = T>
|
||||
{
|
||||
let size = r.size();
|
||||
|
||||
@ -82,7 +82,7 @@ impl<T: ?Sized, B> BufferSlice<T, B> {
|
||||
|
||||
#[inline]
|
||||
pub fn from_typed_buffer_access(r: B) -> BufferSlice<T, B>
|
||||
where B: TypedBufferAccess<Content = T>, T: 'static
|
||||
where B: TypedBufferAccess<Content = T>
|
||||
{
|
||||
let size = r.size();
|
||||
|
||||
@ -251,7 +251,7 @@ unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B> where B: BufferAcce
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, B> TypedBufferAccess for BufferSlice<T, B> where B: BufferAccess, T: 'static {
|
||||
unsafe impl<T: ?Sized, B> TypedBufferAccess for BufferSlice<T, B> where B: BufferAccess, {
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,6 @@ use VulkanPointers;
|
||||
use vk;
|
||||
|
||||
/// Data storage in a GPU-accessible location.
|
||||
#[derive(Debug)]
|
||||
pub struct UnsafeBuffer {
|
||||
buffer: vk::Buffer,
|
||||
device: Arc<Device>,
|
||||
@ -258,6 +257,13 @@ unsafe impl DeviceOwned for UnsafeBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UnsafeBuffer {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan buffer {:?}>", self.buffer)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UnsafeBuffer {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
|
@ -22,6 +22,7 @@ use VulkanObject;
|
||||
/// Trait for objects that represent either a buffer or a slice of a buffer.
|
||||
///
|
||||
/// See also `TypedBuffer`.
|
||||
// TODO: require `DeviceOwned`
|
||||
pub unsafe trait Buffer {
|
||||
/// Object that represents a GPU access to the buffer.
|
||||
type Access: BufferAccess;
|
||||
@ -50,8 +51,7 @@ pub unsafe trait Buffer {
|
||||
/// Returns `None` if out of range.
|
||||
#[inline]
|
||||
fn slice<T>(self, range: Range<usize>) -> Option<BufferSlice<[T], Self>>
|
||||
where Self: Sized + TypedBuffer<Content = [T]>,
|
||||
T: 'static
|
||||
where Self: Sized + TypedBuffer<Content = [T]>
|
||||
{
|
||||
BufferSlice::slice(self.into_buffer_slice(), range)
|
||||
}
|
||||
@ -74,8 +74,7 @@ pub unsafe trait Buffer {
|
||||
/// Returns `None` if out of range.
|
||||
#[inline]
|
||||
fn index<T>(self, index: usize) -> Option<BufferSlice<[T], Self>>
|
||||
where Self: Sized + TypedBuffer<Content = [T]>,
|
||||
T: 'static
|
||||
where Self: Sized + TypedBuffer<Content = [T]>
|
||||
{
|
||||
self.slice(index .. (index + 1))
|
||||
}
|
||||
@ -84,7 +83,7 @@ pub unsafe trait Buffer {
|
||||
/// Extension trait for `Buffer`. Indicates the type of the content of the buffer.
|
||||
pub unsafe trait TypedBuffer: Buffer {
|
||||
/// The type of the content of the buffer.
|
||||
type Content: ?Sized + 'static;
|
||||
type Content: ?Sized;
|
||||
}
|
||||
|
||||
/// Trait for objects that represent a way for the GPU to have access to a buffer or a slice of a
|
||||
@ -128,8 +127,7 @@ pub unsafe trait BufferAccess: DeviceOwned {
|
||||
/// Returns `None` if out of range.
|
||||
#[inline]
|
||||
fn slice<T>(&self, range: Range<usize>) -> Option<BufferSlice<[T], &Self>>
|
||||
where Self: Sized + TypedBufferAccess<Content = [T]>,
|
||||
T: 'static
|
||||
where Self: Sized + TypedBufferAccess<Content = [T]>
|
||||
{
|
||||
BufferSlice::slice(self.as_buffer_slice(), range)
|
||||
}
|
||||
@ -152,8 +150,7 @@ pub unsafe trait BufferAccess: DeviceOwned {
|
||||
/// Returns `None` if out of range.
|
||||
#[inline]
|
||||
fn index<T>(&self, index: usize) -> Option<BufferSlice<[T], &Self>>
|
||||
where Self: Sized + TypedBufferAccess<Content = [T]>,
|
||||
T: 'static
|
||||
where Self: Sized + TypedBufferAccess<Content = [T]>
|
||||
{
|
||||
self.slice(index .. (index + 1))
|
||||
}
|
||||
@ -306,7 +303,7 @@ unsafe impl<T> BufferAccess for T where T: SafeDeref, T::Target: BufferAccess {
|
||||
/// Extension trait for `BufferAccess`. Indicates the type of the content of the buffer.
|
||||
pub unsafe trait TypedBufferAccess: BufferAccess {
|
||||
/// The type of the content.
|
||||
type Content: ?Sized + 'static;
|
||||
type Content: ?Sized;
|
||||
}
|
||||
|
||||
unsafe impl<T> TypedBufferAccess for T where T: SafeDeref, T::Target: TypedBufferAccess {
|
||||
|
@ -31,8 +31,9 @@
|
||||
//! .. Usage::none()
|
||||
//! };
|
||||
//!
|
||||
//! let buffer = ImmutableBuffer::<[u32]>::array(&device, 128, &usage,
|
||||
//! Some(queue.family())).unwrap();
|
||||
//! let (buffer, _future) = ImmutableBuffer::<[u32]>::from_iter((0..128).map(|n| n), &usage,
|
||||
//! Some(queue.family()),
|
||||
//! queue.clone()).unwrap();
|
||||
//! let _view = BufferView::new(buffer, format::R32Uint).unwrap();
|
||||
//! ```
|
||||
|
||||
@ -208,6 +209,15 @@ unsafe impl<F, B> DeviceOwned for BufferView<F, B>
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, B> fmt::Debug for BufferView<F, B> where B: BufferAccess + fmt::Debug {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("BufferView")
|
||||
.field("raw", &self.view)
|
||||
.field("buffer", &self.buffer)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, B> Drop for BufferView<F, B> where B: BufferAccess {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
@ -326,8 +336,8 @@ mod tests {
|
||||
.. Usage::none()
|
||||
};
|
||||
|
||||
let buffer = ImmutableBuffer::<[[u8; 4]]>::array(&device, 128, &usage,
|
||||
Some(queue.family())).unwrap();
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), &usage,
|
||||
Some(queue.family()), queue.clone()).unwrap();
|
||||
let view = BufferView::new(buffer, format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
assert!(view.uniform_texel_buffer());
|
||||
@ -343,8 +353,9 @@ mod tests {
|
||||
.. Usage::none()
|
||||
};
|
||||
|
||||
let buffer = ImmutableBuffer::<[[u8; 4]]>::array(&device, 128, &usage,
|
||||
Some(queue.family())).unwrap();
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), &usage,
|
||||
Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
let view = BufferView::new(buffer, format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
assert!(view.storage_texel_buffer());
|
||||
@ -360,8 +371,9 @@ mod tests {
|
||||
.. Usage::none()
|
||||
};
|
||||
|
||||
let buffer = ImmutableBuffer::<[u32]>::array(&device, 128, &usage,
|
||||
Some(queue.family())).unwrap();
|
||||
let (buffer, _) = ImmutableBuffer::<[u32]>::from_iter((0..128).map(|_| 0), &usage,
|
||||
Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
let view = BufferView::new(buffer, format::R32Uint).unwrap();
|
||||
|
||||
assert!(view.storage_texel_buffer());
|
||||
@ -373,8 +385,10 @@ mod tests {
|
||||
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let buffer = ImmutableBuffer::<[[u8; 4]]>::array(&device, 128, &Usage::none(),
|
||||
Some(queue.family())).unwrap();
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]),
|
||||
&Usage::none(),
|
||||
Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
|
||||
match BufferView::new(buffer, format::R8G8B8A8Unorm) {
|
||||
Err(BufferViewCreationError::WrongBufferUsage) => (),
|
||||
@ -392,8 +406,9 @@ mod tests {
|
||||
.. Usage::none()
|
||||
};
|
||||
|
||||
let buffer = ImmutableBuffer::<[[f64; 4]]>::array(&device, 128, &usage,
|
||||
Some(queue.family())).unwrap();
|
||||
let (buffer, _) = ImmutableBuffer::<[[f64; 4]]>::from_iter((0..128).map(|_| [0.0; 4]),
|
||||
&usage, Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
|
||||
// TODO: what if R64G64B64A64Sfloat is supported?
|
||||
match BufferView::new(buffer, format::R64G64B64A64Sfloat) {
|
||||
|
@ -57,7 +57,7 @@ impl<S, P> CmdBindDescriptorSets<S, P>
|
||||
pub fn new(graphics: bool, pipeline_layout: P, sets: S)
|
||||
-> Result<CmdBindDescriptorSets<S, P>, CmdBindDescriptorSetsError>
|
||||
{
|
||||
if !PipelineLayoutSetsCompatible::is_compatible(pipeline_layout.desc(), &sets) {
|
||||
if !PipelineLayoutSetsCompatible::is_compatible(&pipeline_layout, &sets) {
|
||||
return Err(CmdBindDescriptorSetsError::IncompatibleSets);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ impl<Pc, Pl> CmdPushConstants<Pc, Pl>
|
||||
pub fn new(pipeline_layout: Pl, push_constants: Pc)
|
||||
-> Result<CmdPushConstants<Pc, Pl>, CmdPushConstantsError>
|
||||
{
|
||||
if !PipelineLayoutPushConstantsCompatible::is_compatible(pipeline_layout.desc(), &push_constants) {
|
||||
if !PipelineLayoutPushConstantsCompatible::is_compatible(&pipeline_layout, &push_constants) {
|
||||
return Err(CmdPushConstantsError::IncompatibleData);
|
||||
}
|
||||
|
||||
@ -78,8 +78,8 @@ unsafe impl<'a, P, Pc, Pl> AddCommand<&'a CmdPushConstants<Pc, Pl>> for UnsafeCo
|
||||
|
||||
let data_raw = &command.push_constants as *const Pc as *const u8;
|
||||
|
||||
for num_range in 0 .. command.pipeline_layout.desc().num_push_constants_ranges() {
|
||||
let range = match command.pipeline_layout.desc().push_constants_range(num_range) {
|
||||
for num_range in 0 .. command.pipeline_layout.num_push_constants_ranges() {
|
||||
let range = match command.pipeline_layout.push_constants_range(num_range) {
|
||||
Some(r) => r,
|
||||
None => continue
|
||||
};
|
||||
|
@ -77,6 +77,7 @@
|
||||
pub use self::auto::AutoCommandBufferBuilder;
|
||||
pub use self::builder::CommandAddError;
|
||||
pub use self::builder::CommandBufferBuilder;
|
||||
pub use self::builder::CommandBufferBuilderError;
|
||||
pub use self::traits::CommandBuffer;
|
||||
pub use self::traits::CommandBufferBuild;
|
||||
pub use self::traits::CommandBufferExecFuture;
|
||||
|
@ -97,7 +97,7 @@ impl<'a> SubmitCommandBufferBuilder<'a> {
|
||||
/// builder.submit(&queue).unwrap();
|
||||
///
|
||||
/// // We must not destroy the fence before it is signaled.
|
||||
/// fence.wait(Duration::from_secs(5)).unwrap();
|
||||
/// fence.wait(Some(Duration::from_secs(5))).unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
@ -324,7 +324,7 @@ mod tests {
|
||||
builder.set_fence_signal(&fence);
|
||||
|
||||
builder.submit(&queue).unwrap();
|
||||
fence.wait(Duration::from_secs(10)).unwrap();
|
||||
fence.wait(Some(Duration::from_secs(5))).unwrap();
|
||||
assert!(fence.ready().unwrap());
|
||||
}
|
||||
}
|
||||
|
@ -155,9 +155,9 @@ impl<L> SimpleDescriptorSetBuilder<L, ()> where L: PipelineLayoutAbstract {
|
||||
/// - Panics if the set id is out of range.
|
||||
///
|
||||
pub fn new(layout: L, set_id: usize) -> SimpleDescriptorSetBuilder<L, ()> {
|
||||
assert!(layout.desc().num_sets() > set_id);
|
||||
assert!(layout.num_sets() > set_id);
|
||||
|
||||
let cap = layout.desc().num_bindings_in_set(set_id).unwrap_or(0);
|
||||
let cap = layout.num_bindings_in_set(set_id).unwrap_or(0);
|
||||
|
||||
SimpleDescriptorSetBuilder {
|
||||
layout: layout,
|
||||
@ -210,9 +210,9 @@ unsafe impl<L, R, T> SimpleDescriptorSetBufferExt<L, R> for T
|
||||
{
|
||||
let buffer = self.access();
|
||||
|
||||
let (set_id, binding_id) = i.layout.desc().descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
assert_eq!(set_id, i.set_id); // TODO: Result instead
|
||||
let desc = i.layout.desc().descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
|
||||
assert!(desc.array_count == 1); // not implemented
|
||||
i.writes.push(match desc.ty.ty().unwrap() {
|
||||
@ -260,9 +260,9 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for T
|
||||
{
|
||||
let image_view = self.access();
|
||||
|
||||
let (set_id, binding_id) = i.layout.desc().descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
assert_eq!(set_id, i.set_id); // TODO: Result instead
|
||||
let desc = i.layout.desc().descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
|
||||
assert!(desc.array_count == 1); // not implemented
|
||||
i.writes.push(match desc.ty.ty().unwrap() {
|
||||
@ -308,9 +308,9 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for (T, Arc<Sampler>)
|
||||
{
|
||||
let image_view = self.0.access();
|
||||
|
||||
let (set_id, binding_id) = i.layout.desc().descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
assert_eq!(set_id, i.set_id); // TODO: Result instead
|
||||
let desc = i.layout.desc().descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
|
||||
assert!(desc.array_count == 1); // not implemented
|
||||
i.writes.push(match desc.ty.ty().unwrap() {
|
||||
@ -349,9 +349,9 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for Vec<(T, Arc<Sampler>)
|
||||
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out>
|
||||
{
|
||||
let (set_id, binding_id) = i.layout.desc().descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
assert_eq!(set_id, i.set_id); // TODO: Result instead
|
||||
let desc = i.layout.desc().descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
|
||||
assert_eq!(desc.array_count as usize, self.len()); // not implemented
|
||||
|
||||
|
@ -433,6 +433,15 @@ unsafe impl DeviceOwned for UnsafeDescriptorPool {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UnsafeDescriptorPool {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("UnsafeDescriptorPool")
|
||||
.field("raw", &self.pool)
|
||||
.field("device", &self.device)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UnsafeDescriptorPool {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
@ -484,6 +493,7 @@ impl fmt::Display for DescriptorPoolAllocError {
|
||||
}
|
||||
|
||||
/// Iterator to the descriptor sets allocated from an unsafe descriptor pool.
|
||||
#[derive(Debug)]
|
||||
pub struct UnsafeDescriptorPoolAllocIter {
|
||||
sets: VecIntoIter<vk::DescriptorSet>,
|
||||
}
|
||||
@ -708,6 +718,12 @@ unsafe impl VulkanObject for UnsafeDescriptorSet {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UnsafeDescriptorSet {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan descriptor set {:?}>", self.set)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a single write entry to a descriptor set.
|
||||
///
|
||||
/// Use the various constructors to build a `DescriptorWrite`. While it is safe to build a
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
@ -109,6 +110,15 @@ unsafe impl DeviceOwned for UnsafeDescriptorSetLayout {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UnsafeDescriptorSetLayout {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("UnsafeDescriptorSetLayout")
|
||||
.field("raw", &self.layout)
|
||||
.field("device", &self.device)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl VulkanObject for UnsafeDescriptorSetLayout {
|
||||
type Object = vk::DescriptorSetLayout;
|
||||
|
||||
|
@ -21,6 +21,7 @@ use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use vk;
|
||||
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
@ -32,7 +33,7 @@ use device::DeviceOwned;
|
||||
|
||||
/// Wrapper around the `PipelineLayout` Vulkan object. Describes to the Vulkan implementation the
|
||||
/// descriptor sets and push constants available to your shaders
|
||||
pub struct PipelineLayout<L = Box<PipelineLayoutDescNames + Send + Sync>> {
|
||||
pub struct PipelineLayout<L> {
|
||||
device: Arc<Device>,
|
||||
layout: vk::PipelineLayout,
|
||||
layouts: SmallVec<[Arc<UnsafeDescriptorSetLayout>; 16]>,
|
||||
@ -174,13 +175,42 @@ unsafe impl<D> PipelineLayoutAbstract for PipelineLayout<D> where D: PipelineLay
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn desc(&self) -> &PipelineLayoutDescNames {
|
||||
&self.desc
|
||||
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>> {
|
||||
self.layouts.get(index)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> PipelineLayoutDesc for PipelineLayout<D> where D: PipelineLayoutDesc {
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
self.desc.num_sets()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>> {
|
||||
self.layouts.get(index)
|
||||
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
|
||||
self.desc.num_bindings_in_set(set)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
|
||||
self.desc.descriptor(set, binding)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_push_constants_ranges(&self) -> usize {
|
||||
self.desc.num_push_constants_ranges()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn push_constants_range(&self, num: usize) -> Option<PipelineLayoutDescPcRange> {
|
||||
self.desc.push_constants_range(num)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> PipelineLayoutDescNames for PipelineLayout<D> where D: PipelineLayoutDescNames {
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
self.desc.descriptor_by_name(name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,6 +221,16 @@ unsafe impl<D> DeviceOwned for PipelineLayout<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> fmt::Debug for PipelineLayout<D> where D: fmt::Debug {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("PipelineLayout")
|
||||
.field("raw", &self.layout)
|
||||
.field("device", &self.device)
|
||||
.field("desc", &self.desc)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<L> Drop for PipelineLayout<L> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
|
@ -25,7 +25,8 @@ use device::DeviceOwned;
|
||||
use SafeDeref;
|
||||
|
||||
/// Trait for objects that describe the layout of the descriptors and push constants of a pipeline.
|
||||
pub unsafe trait PipelineLayoutAbstract: DeviceOwned {
|
||||
// TODO: meh for PipelineLayoutDescNames ; the `Names` thing shouldn't be mandatory
|
||||
pub unsafe trait PipelineLayoutAbstract: PipelineLayoutDescNames + DeviceOwned {
|
||||
/// Returns an opaque object that allows internal access to the pipeline layout.
|
||||
///
|
||||
/// Can be obtained by calling `PipelineLayoutAbstract::sys()` on the pipeline layout.
|
||||
@ -33,12 +34,6 @@ pub unsafe trait PipelineLayoutAbstract: DeviceOwned {
|
||||
/// > **Note**: This is an internal function that you normally don't need to call.
|
||||
fn sys(&self) -> PipelineLayoutSys;
|
||||
|
||||
/// Returns the description of the pipeline layout.
|
||||
///
|
||||
/// Can be obtained by calling `PipelineLayoutAbstract::desc()` on the pipeline layout.
|
||||
// TODO: meh for `PipelineLayoutDescNames instead of `PipelineLayoutDesc`
|
||||
fn desc(&self) -> &PipelineLayoutDescNames;
|
||||
|
||||
/// Returns the `UnsafeDescriptorSetLayout` object of the specified set index.
|
||||
///
|
||||
/// Returns `None` if out of range or if the set is empty for this index.
|
||||
@ -51,11 +46,6 @@ unsafe impl<T> PipelineLayoutAbstract for T where T: SafeDeref, T::Target: Pipel
|
||||
(**self).sys()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn desc(&self) -> &PipelineLayoutDescNames {
|
||||
(**self).desc()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>> {
|
||||
(**self).descriptor_set_layout(index)
|
||||
|
@ -408,7 +408,7 @@ impl Device {
|
||||
impl fmt::Debug for Device {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan device>")
|
||||
write!(fmt, "<Vulkan device {:?}>", self.device)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -119,6 +119,7 @@ use vk;
|
||||
/// };
|
||||
/// # }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct Framebuffer<Rp, A> {
|
||||
device: Arc<Device>,
|
||||
render_pass: Rp,
|
||||
|
@ -393,6 +393,16 @@ unsafe impl<D> DeviceOwned for RenderPass<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> fmt::Debug for RenderPass<D> where D: fmt::Debug {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("RenderPass")
|
||||
.field("raw", &self.render_pass)
|
||||
.field("device", &self.device)
|
||||
.field("desc", &self.desc)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drop for RenderPass<D> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
|
@ -51,7 +51,6 @@ use vk;
|
||||
/// - The usage must be manually enforced.
|
||||
/// - The image layout must be manually enforced and transitionned.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
pub struct UnsafeImage {
|
||||
image: vk::Image,
|
||||
device: Arc<Device>,
|
||||
@ -680,6 +679,13 @@ unsafe impl VulkanObject for UnsafeImage {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UnsafeImage {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan image {:?}>", self.image)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UnsafeImage {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
@ -793,7 +799,6 @@ pub struct LinearLayout {
|
||||
pub depth_pitch: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UnsafeImageView {
|
||||
view: vk::ImageView,
|
||||
device: Arc<Device>,
|
||||
@ -950,6 +955,13 @@ unsafe impl VulkanObject for UnsafeImageView {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UnsafeImageView {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan image view {:?}>", self.view)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UnsafeImageView {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
|
@ -311,7 +311,7 @@ impl Instance {
|
||||
impl fmt::Debug for Instance {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan instance>")
|
||||
write!(fmt, "<Vulkan instance {:?}>", self.instance)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::ops::Deref;
|
||||
@ -17,9 +18,9 @@ use std::sync::Arc;
|
||||
|
||||
use instance::MemoryType;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use memory::Content;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -38,17 +39,16 @@ use vk;
|
||||
/// let mem_ty = device.physical_device().memory_types().next().unwrap();
|
||||
///
|
||||
/// // Allocates 1kB of memory.
|
||||
/// let memory = DeviceMemory::alloc(&device, mem_ty, 1024).unwrap();
|
||||
/// let memory = DeviceMemory::alloc(device.clone(), mem_ty, 1024).unwrap();
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct DeviceMemory<D = Arc<Device>> where D: SafeDeref<Target = Device> {
|
||||
pub struct DeviceMemory {
|
||||
memory: vk::DeviceMemory,
|
||||
device: D,
|
||||
device: Arc<Device>,
|
||||
size: usize,
|
||||
memory_type_index: u32,
|
||||
}
|
||||
|
||||
impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
impl DeviceMemory {
|
||||
/// Allocates a chunk of memory from the device.
|
||||
///
|
||||
/// Some platforms may have a limit on the maximum size of a single allocation. For example,
|
||||
@ -60,11 +60,9 @@ impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// - Panics if `memory_type` doesn't belong to the same physical device as `device`.
|
||||
///
|
||||
// TODO: VK_ERROR_TOO_MANY_OBJECTS error
|
||||
// TODO: remove that `D` generic and use `Arc<Device>`
|
||||
#[inline]
|
||||
pub fn alloc(device: &D, memory_type: MemoryType, size: usize)
|
||||
-> Result<DeviceMemory<D>, OomError>
|
||||
where D: Clone
|
||||
pub fn alloc(device: Arc<Device>, memory_type: MemoryType, size: usize)
|
||||
-> Result<DeviceMemory, OomError>
|
||||
{
|
||||
assert!(size >= 1);
|
||||
assert_eq!(device.physical_device().internal_object(),
|
||||
@ -74,9 +72,9 @@ impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
return Err(OomError::OutOfDeviceMemory);
|
||||
}
|
||||
|
||||
let vk = device.pointers();
|
||||
|
||||
let memory = unsafe {
|
||||
let vk = device.pointers();
|
||||
|
||||
let infos = vk::MemoryAllocateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
@ -92,7 +90,7 @@ impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
|
||||
Ok(DeviceMemory {
|
||||
memory: memory,
|
||||
device: device.clone(),
|
||||
device: device,
|
||||
size: size,
|
||||
memory_type_index: memory_type.id(),
|
||||
})
|
||||
@ -105,14 +103,13 @@ impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// - Panics if `memory_type` doesn't belong to the same physical device as `device`.
|
||||
/// - Panics if the memory type is not host-visible.
|
||||
///
|
||||
pub fn alloc_and_map(device: &D, memory_type: MemoryType, size: usize)
|
||||
-> Result<MappedDeviceMemory<D>, OomError>
|
||||
where D: Clone
|
||||
pub fn alloc_and_map(device: Arc<Device>, memory_type: MemoryType, size: usize)
|
||||
-> Result<MappedDeviceMemory, OomError>
|
||||
{
|
||||
let vk = device.pointers();
|
||||
|
||||
assert!(memory_type.is_host_visible());
|
||||
let mem = try!(DeviceMemory::alloc(device, memory_type, size));
|
||||
let mem = try!(DeviceMemory::alloc(device.clone(), memory_type, size));
|
||||
|
||||
let coherent = memory_type.is_host_coherent();
|
||||
|
||||
@ -142,15 +139,26 @@ impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
pub fn size(&self) -> usize {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the device associated with this allocation.
|
||||
unsafe impl DeviceOwned for DeviceMemory {
|
||||
#[inline]
|
||||
pub fn device(&self) -> &Device {
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
&self.device
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> VulkanObject for DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
impl fmt::Debug for DeviceMemory {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("DeviceMemory")
|
||||
.field("device", &*self.device)
|
||||
.field("memory_type", &self.memory_type())
|
||||
.field("size", &self.size)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl VulkanObject for DeviceMemory {
|
||||
type Object = vk::DeviceMemory;
|
||||
|
||||
#[inline]
|
||||
@ -159,13 +167,12 @@ unsafe impl<D> VulkanObject for DeviceMemory<D> where D: SafeDeref<Target = Devi
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drop for DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
impl Drop for DeviceMemory {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let device = self.device();
|
||||
let vk = device.pointers();
|
||||
vk.FreeMemory(device.internal_object(), self.memory, ptr::null());
|
||||
let vk = self.device.pointers();
|
||||
vk.FreeMemory(self.device.internal_object(), self.memory, ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -190,7 +197,7 @@ impl<D> Drop for DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// .next().unwrap(); // Vk specs guarantee that this can't fail
|
||||
///
|
||||
/// // Allocates 1kB of memory.
|
||||
/// let memory = DeviceMemory::alloc_and_map(&device, mem_ty, 1024).unwrap();
|
||||
/// let memory = DeviceMemory::alloc_and_map(device.clone(), mem_ty, 1024).unwrap();
|
||||
///
|
||||
/// // Get access to the content. Note that this is very unsafe for two reasons: 1) the content is
|
||||
/// // uninitialized, and 2) the access is unsynchronized.
|
||||
@ -199,19 +206,29 @@ impl<D> Drop for DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// content[12] = 54; // `content` derefs to a `&[u8]` or a `&mut [u8]`
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct MappedDeviceMemory<D = Arc<Device>> where D: SafeDeref<Target = Device> {
|
||||
memory: DeviceMemory<D>,
|
||||
pub struct MappedDeviceMemory {
|
||||
memory: DeviceMemory,
|
||||
pointer: *mut c_void,
|
||||
coherent: bool,
|
||||
}
|
||||
|
||||
impl<D> MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// Returns the underlying `DeviceMemory`.
|
||||
// TODO: impl AsRef instead?
|
||||
#[inline]
|
||||
pub fn memory(&self) -> &DeviceMemory<D> {
|
||||
&self.memory
|
||||
// Note that `MappedDeviceMemory` doesn't implement `Drop`, as we don't need to unmap memory before
|
||||
// freeing it.
|
||||
//
|
||||
// Vulkan specs, documentation of `vkFreeMemory`:
|
||||
// > If a memory object is mapped at the time it is freed, it is implicitly unmapped.
|
||||
//
|
||||
|
||||
impl MappedDeviceMemory {
|
||||
/// Unmaps the memory. It will no longer be accessible from the CPU.
|
||||
pub fn unmap(self) -> DeviceMemory {
|
||||
unsafe {
|
||||
let device = self.memory.device();
|
||||
let vk = device.pointers();
|
||||
vk.UnmapMemory(device.internal_object(), self.memory.memory);
|
||||
}
|
||||
|
||||
self.memory
|
||||
}
|
||||
|
||||
/// Gives access to the content of the memory.
|
||||
@ -229,8 +246,8 @@ impl<D> MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// the `MappedDeviceMemory`.
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn read_write<T: ?Sized>(&self, range: Range<usize>) -> CpuAccess<T, D>
|
||||
where T: Content + 'static
|
||||
pub unsafe fn read_write<T: ?Sized>(&self, range: Range<usize>) -> CpuAccess<T>
|
||||
where T: Content
|
||||
{
|
||||
let vk = self.memory.device().pointers();
|
||||
let pointer = T::ref_from_ptr((self.pointer as usize + range.start) as *mut _,
|
||||
@ -258,31 +275,49 @@ impl<D> MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> Send for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {}
|
||||
unsafe impl<D> Sync for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {}
|
||||
|
||||
impl<D> Drop for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
impl AsRef<DeviceMemory> for MappedDeviceMemory {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let device = self.memory.device();
|
||||
let vk = device.pointers();
|
||||
vk.UnmapMemory(device.internal_object(), self.memory.memory);
|
||||
}
|
||||
fn as_ref(&self) -> &DeviceMemory {
|
||||
&self.memory
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<DeviceMemory> for MappedDeviceMemory {
|
||||
#[inline]
|
||||
fn as_mut(&mut self) -> &mut DeviceMemory {
|
||||
&mut self.memory
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceOwned for MappedDeviceMemory {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.memory.device()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for MappedDeviceMemory {}
|
||||
unsafe impl Sync for MappedDeviceMemory {}
|
||||
|
||||
impl fmt::Debug for MappedDeviceMemory {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("MappedDeviceMemory")
|
||||
.field(&self.memory)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Object that can be used to read or write the content of a `MappedDeviceMemory`.
|
||||
///
|
||||
/// This object derefs to the content, just like a `MutexGuard` for example.
|
||||
pub struct CpuAccess<'a, T: ?Sized + 'a, D = Arc<Device>> where D: SafeDeref<Target = Device> + 'a {
|
||||
pub struct CpuAccess<'a, T: ?Sized + 'a> {
|
||||
pointer: *mut T,
|
||||
mem: &'a MappedDeviceMemory<D>,
|
||||
mem: &'a MappedDeviceMemory,
|
||||
coherent: bool,
|
||||
range: Range<usize>,
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
|
||||
impl<'a, T: ?Sized + 'a> CpuAccess<'a, T> {
|
||||
/// Builds a new `CpuAccess` to access a sub-part of the current `CpuAccess`.
|
||||
///
|
||||
/// This function is unstable. Don't use it directly.
|
||||
@ -290,7 +325,7 @@ impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: SafeDeref<Target =
|
||||
// TODO: decide what to do with this
|
||||
#[doc(hidden)]
|
||||
#[inline]
|
||||
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U, D>
|
||||
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U>
|
||||
where F: FnOnce(*mut T) -> *mut U
|
||||
{
|
||||
CpuAccess {
|
||||
@ -302,12 +337,10 @@ impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: SafeDeref<Target =
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Send for CpuAccess<'a, T, D>
|
||||
where D: SafeDeref<Target = Device> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Sync for CpuAccess<'a, T, D>
|
||||
where D: SafeDeref<Target = Device> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
|
||||
impl<'a, T: ?Sized + 'a> Deref for CpuAccess<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
#[inline]
|
||||
@ -316,33 +349,31 @@ impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: SafeDeref
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> DerefMut for CpuAccess<'a, T, D>
|
||||
where D: SafeDeref<Target = Device>
|
||||
{
|
||||
impl<'a, T: ?Sized + 'a> DerefMut for CpuAccess<'a, T> {
|
||||
#[inline]
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.pointer }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> Drop for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
|
||||
impl<'a, T: ?Sized + 'a> Drop for CpuAccess<'a, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// If the memory doesn't have the `coherent` flag, we need to flush the data.
|
||||
if !self.coherent {
|
||||
let vk = self.mem.memory().device().pointers();
|
||||
let vk = self.mem.as_ref().device().pointers();
|
||||
|
||||
let range = vk::MappedMemoryRange {
|
||||
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
|
||||
pNext: ptr::null(),
|
||||
memory: self.mem.memory().internal_object(),
|
||||
memory: self.mem.as_ref().internal_object(),
|
||||
offset: self.range.start as u64,
|
||||
size: (self.range.end - self.range.start) as u64,
|
||||
};
|
||||
|
||||
// TODO: check result?
|
||||
unsafe {
|
||||
vk.FlushMappedMemoryRanges(self.mem.memory().device().internal_object(),
|
||||
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(),
|
||||
1, &range);
|
||||
}
|
||||
}
|
||||
@ -358,7 +389,7 @@ mod tests {
|
||||
fn create() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let mem_ty = device.physical_device().memory_types().next().unwrap();
|
||||
let _ = DeviceMemory::alloc(&device, mem_ty, 256).unwrap();
|
||||
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -366,7 +397,7 @@ mod tests {
|
||||
fn zero_size() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let mem_ty = device.physical_device().memory_types().next().unwrap();
|
||||
let _ = DeviceMemory::alloc(&device, mem_ty, 0);
|
||||
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -376,7 +407,7 @@ mod tests {
|
||||
let mem_ty = device.physical_device().memory_types().filter(|m| !m.is_lazily_allocated())
|
||||
.next().unwrap();
|
||||
|
||||
match DeviceMemory::alloc(&device, mem_ty, 0xffffffffffffffff) {
|
||||
match DeviceMemory::alloc(device.clone(), mem_ty, 0xffffffffffffffff) {
|
||||
Err(OomError::OutOfDeviceMemory) => (),
|
||||
_ => panic!()
|
||||
}
|
||||
@ -392,7 +423,7 @@ mod tests {
|
||||
let mut allocs = Vec::new();
|
||||
|
||||
for _ in 0 .. 4 {
|
||||
match DeviceMemory::alloc(&device, mem_ty, heap_size / 3) {
|
||||
match DeviceMemory::alloc(device.clone(), mem_ty, heap_size / 3) {
|
||||
Err(OomError::OutOfDeviceMemory) => return, // test succeeded
|
||||
Ok(a) => allocs.push(a),
|
||||
_ => ()
|
||||
|
@ -71,7 +71,7 @@
|
||||
//! // Taking the first memory type for the sake of this example.
|
||||
//! let ty = device.physical_device().memory_types().next().unwrap();
|
||||
//!
|
||||
//! let alloc = DeviceMemory::alloc(&device, ty, 1024).expect("Failed to allocate memory");
|
||||
//! let alloc = DeviceMemory::alloc(device.clone(), ty, 1024).expect("Failed to allocate memory");
|
||||
//!
|
||||
//! // The memory is automatically free'd when `alloc` is destroyed.
|
||||
//! ```
|
||||
|
@ -88,7 +88,7 @@ impl StdHostVisibleMemoryTypePool {
|
||||
|
||||
// Try append at the end.
|
||||
let last_end = entries.last().map(|e| align(e.end, alignment)).unwrap_or(0);
|
||||
if last_end + size <= dev_mem.memory().size() {
|
||||
if last_end + size <= (**dev_mem).as_ref().size() {
|
||||
entries.push(last_end .. last_end + size);
|
||||
return Ok(StdHostVisibleMemoryTypePoolAlloc {
|
||||
pool: me.clone(),
|
||||
@ -103,7 +103,7 @@ impl StdHostVisibleMemoryTypePool {
|
||||
let new_block = {
|
||||
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
||||
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
|
||||
let new_block = try!(DeviceMemory::alloc_and_map(&me.device, me.memory_type(), to_alloc));
|
||||
let new_block = try!(DeviceMemory::alloc_and_map(me.device.clone(), me.memory_type(), to_alloc));
|
||||
Arc::new(new_block)
|
||||
};
|
||||
|
||||
|
@ -24,8 +24,7 @@ mod non_host_visible;
|
||||
mod pool;
|
||||
|
||||
/// Pool of GPU-visible memory that can be allocated from.
|
||||
// TODO: remove 'static + Send + Sync
|
||||
pub unsafe trait MemoryPool: 'static + Send + Sync {
|
||||
pub unsafe trait MemoryPool {
|
||||
/// Object that represents a single allocation. Its destructor should free the chunk.
|
||||
type Alloc: MemoryPoolAlloc;
|
||||
|
||||
@ -49,8 +48,7 @@ pub unsafe trait MemoryPool: 'static + Send + Sync {
|
||||
}
|
||||
|
||||
/// Object that represents a single allocation. Its destructor should free the chunk.
|
||||
// TODO: remove 'static + Send + Sync
|
||||
pub unsafe trait MemoryPoolAlloc: 'static + Send + Sync {
|
||||
pub unsafe trait MemoryPoolAlloc {
|
||||
/// Returns the memory object from which this is allocated. Returns `None` if the memory is
|
||||
/// not mapped.
|
||||
fn mapped_memory(&self) -> Option<&MappedDeviceMemory>;
|
||||
|
@ -102,7 +102,7 @@ impl StdNonHostVisibleMemoryTypePool {
|
||||
let new_block = {
|
||||
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
||||
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
|
||||
let new_block = try!(DeviceMemory::alloc(&me.device, me.memory_type(), to_alloc));
|
||||
let new_block = try!(DeviceMemory::alloc(me.device.clone(), me.memory_type(), to_alloc));
|
||||
Arc::new(new_block)
|
||||
};
|
||||
|
||||
|
@ -122,7 +122,7 @@ unsafe impl MemoryPoolAlloc for StdMemoryPoolAlloc {
|
||||
fn memory(&self) -> &DeviceMemory {
|
||||
match self.inner {
|
||||
StdMemoryPoolAllocInner::NonHostVisible(ref mem) => mem.memory(),
|
||||
StdMemoryPoolAllocInner::HostVisible(ref mem) => mem.memory().memory(),
|
||||
StdMemoryPoolAllocInner::HostVisible(ref mem) => mem.memory().as_ref(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,12 +14,15 @@ use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use descriptor::PipelineLayoutAbstract;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSys;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSuperset;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use descriptor::pipeline_layout::PipelineLayoutNotSupersetError;
|
||||
use pipeline::shader::ComputeShaderEntryPoint;
|
||||
use pipeline::shader::SpecializationConstants;
|
||||
@ -114,6 +117,13 @@ impl ComputePipeline<()> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Pl> fmt::Debug for ComputePipeline<Pl> {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan compute pipeline {:?}>", self.inner.pipeline)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Pl> ComputePipeline<Pl> {
|
||||
/// Returns the `Device` this compute pipeline was created with.
|
||||
#[inline]
|
||||
@ -173,13 +183,42 @@ unsafe impl<Pl> PipelineLayoutAbstract for ComputePipeline<Pl> where Pl: Pipelin
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn desc(&self) -> &PipelineLayoutDescNames {
|
||||
self.layout().desc()
|
||||
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>> {
|
||||
self.layout().descriptor_set_layout(index)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Pl> PipelineLayoutDesc for ComputePipeline<Pl> where Pl: PipelineLayoutDesc {
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
self.pipeline_layout.num_sets()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>> {
|
||||
self.layout().descriptor_set_layout(index)
|
||||
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
|
||||
self.pipeline_layout.num_bindings_in_set(set)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
|
||||
self.pipeline_layout.descriptor(set, binding)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_push_constants_ranges(&self) -> usize {
|
||||
self.pipeline_layout.num_push_constants_ranges()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn push_constants_range(&self, num: usize) -> Option<PipelineLayoutDescPcRange> {
|
||||
self.pipeline_layout.push_constants_range(num)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Pl> PipelineLayoutDescNames for ComputePipeline<Pl> where Pl: PipelineLayoutDescNames {
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
self.pipeline_layout.descriptor_by_name(name)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,11 +21,13 @@ use buffer::BufferInner;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use descriptor::PipelineLayoutAbstract;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescUnion;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSuperset;
|
||||
use descriptor::pipeline_layout::PipelineLayoutNotSupersetError;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSys;
|
||||
@ -404,18 +406,18 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
|
||||
// Checking that the pipeline layout matches the shader stages.
|
||||
// TODO: more details in the errors
|
||||
PipelineLayoutSuperset::ensure_superset_of(pipeline_layout.desc(),
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
params.vertex_shader.layout())?;
|
||||
PipelineLayoutSuperset::ensure_superset_of(pipeline_layout.desc(),
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
params.fragment_shader.layout())?;
|
||||
if let Some(ref geometry_shader) = params.geometry_shader {
|
||||
PipelineLayoutSuperset::ensure_superset_of(pipeline_layout.desc(),
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
geometry_shader.layout())?;
|
||||
}
|
||||
if let Some(ref tess) = params.tessellation {
|
||||
PipelineLayoutSuperset::ensure_superset_of(pipeline_layout.desc(),
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
tess.tessellation_control_shader.layout())?;
|
||||
PipelineLayoutSuperset::ensure_superset_of(pipeline_layout.desc(),
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
tess.tessellation_evaluation_shader.layout())?;
|
||||
}
|
||||
|
||||
@ -1076,17 +1078,46 @@ unsafe impl<Mv, L, Rp> PipelineLayoutAbstract for GraphicsPipeline<Mv, L, Rp>
|
||||
{
|
||||
#[inline]
|
||||
fn sys(&self) -> PipelineLayoutSys {
|
||||
self.layout().sys()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn desc(&self) -> &PipelineLayoutDescNames {
|
||||
self.layout().desc()
|
||||
self.layout.sys()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>> {
|
||||
self.layout().descriptor_set_layout(index)
|
||||
self.layout.descriptor_set_layout(index)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Mv, L, Rp> PipelineLayoutDesc for GraphicsPipeline<Mv, L, Rp> where L: PipelineLayoutDesc {
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
self.layout.num_sets()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
|
||||
self.layout.num_bindings_in_set(set)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
|
||||
self.layout.descriptor(set, binding)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_push_constants_ranges(&self) -> usize {
|
||||
self.layout.num_push_constants_ranges()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn push_constants_range(&self, num: usize) -> Option<PipelineLayoutDescPcRange> {
|
||||
self.layout.push_constants_range(num)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Mv, L, Rp> PipelineLayoutDescNames for GraphicsPipeline<Mv, L, Rp> where L: PipelineLayoutDescNames {
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
self.layout.descriptor_by_name(name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1097,6 +1128,13 @@ unsafe impl<Mv, L, Rp> DeviceOwned for GraphicsPipeline<Mv, L, Rp> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Mv, L, Rp> fmt::Debug for GraphicsPipeline<Mv, L, Rp> {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan graphics pipeline {:?}>", self.inner.pipeline)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Mv, L, Rp> RenderPassAbstract for GraphicsPipeline<Mv, L, Rp>
|
||||
where Rp: RenderPassAbstract
|
||||
{
|
||||
|
@ -421,6 +421,13 @@ unsafe impl VulkanObject for Sampler {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Sampler {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan sampler {:?}>", self.sampler)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Sampler {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
|
@ -35,7 +35,6 @@ use vk;
|
||||
/// Represents a surface on the screen.
|
||||
///
|
||||
/// Creating a `Surface` is platform-specific.
|
||||
#[derive(Debug)]
|
||||
pub struct Surface {
|
||||
instance: Arc<Instance>,
|
||||
surface: vk::SurfaceKHR,
|
||||
@ -487,6 +486,13 @@ unsafe impl VulkanObject for Surface {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Surface {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan surface {:?}>", self.surface)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Surface {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
|
@ -272,6 +272,7 @@ impl Swapchain {
|
||||
// TODO: has to make sure vkQueuePresent is called, because calling acquire_next_image many
|
||||
// times in a row is an error
|
||||
// TODO: swapchain must not have been replaced by being passed as the VkSwapchainCreateInfoKHR::oldSwapchain value to vkCreateSwapchainKHR
|
||||
// TODO: change timeout to `Option<Duration>`.
|
||||
pub fn acquire_next_image(&self, timeout: Duration) -> Result<(usize, SwapchainAcquireFuture), AcquireError> {
|
||||
unsafe {
|
||||
let stale = self.stale.lock().unwrap();
|
||||
@ -416,6 +417,13 @@ unsafe impl VulkanObject for Swapchain {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Swapchain {
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "<Vulkan swapchain {:?}>", self.swapchain)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Swapchain {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
|
@ -100,16 +100,21 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits until the fence is signaled, or at least until the number of nanoseconds of the
|
||||
/// timeout has elapsed.
|
||||
/// Waits until the fence is signaled, or at least until the timeout duration has elapsed.
|
||||
///
|
||||
/// Returns `Ok` if the fence is now signaled. Returns `Err` if the timeout was reached instead.
|
||||
pub fn wait(&self, timeout: Duration) -> Result<(), FenceWaitError> {
|
||||
///
|
||||
/// If you pass a duration of 0, then the function will return without blocking.
|
||||
pub fn wait(&self, timeout: Option<Duration>) -> Result<(), FenceWaitError> {
|
||||
unsafe {
|
||||
if self.signaled.load(Ordering::Relaxed) { return Ok(()); }
|
||||
|
||||
let timeout_ns = timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64);
|
||||
let timeout_ns = if let Some(timeout) = timeout {
|
||||
timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64)
|
||||
} else {
|
||||
u64::max_value()
|
||||
};
|
||||
|
||||
let vk = self.device.pointers();
|
||||
let r = try!(check_errors(vk.WaitForFences(self.device.internal_object(), 1,
|
||||
@ -133,7 +138,7 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
/// # Panic
|
||||
///
|
||||
/// Panics if not all fences belong to the same device.
|
||||
pub fn multi_wait<'a, I>(iter: I, timeout: Duration) -> Result<(), FenceWaitError>
|
||||
pub fn multi_wait<'a, I>(iter: I, timeout: Option<Duration>) -> Result<(), FenceWaitError>
|
||||
where I: IntoIterator<Item = &'a Fence<D>>, D: 'a
|
||||
{
|
||||
let mut device: Option<&Device> = None;
|
||||
@ -153,8 +158,12 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
}
|
||||
}).collect();
|
||||
|
||||
let timeout_ns = timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64);
|
||||
let timeout_ns = if let Some(timeout) = timeout {
|
||||
timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64)
|
||||
} else {
|
||||
u64::max_value()
|
||||
};
|
||||
|
||||
let r = if let Some(device) = device {
|
||||
unsafe {
|
||||
@ -319,7 +328,7 @@ mod tests {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let fence = Fence::signaled(device.clone()).unwrap();
|
||||
fence.wait(Duration::new(0, 10)).unwrap();
|
||||
fence.wait(Some(Duration::new(0, 10))).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -340,7 +349,7 @@ mod tests {
|
||||
let fence1 = Fence::signaled(device1.clone()).unwrap();
|
||||
let fence2 = Fence::signaled(device2.clone()).unwrap();
|
||||
|
||||
let _ = Fence::multi_wait([&fence1, &fence2].iter().cloned(), Duration::new(0, 10));
|
||||
let _ = Fence::multi_wait([&fence1, &fence2].iter().cloned(), Some(Duration::new(0, 10)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -53,7 +53,7 @@ pub enum FenceSignalFutureBehavior {
|
||||
/// Wait for the fence to be signalled before submitting any further operation.
|
||||
Block {
|
||||
/// How long to block the current thread.
|
||||
timeout: Duration
|
||||
timeout: Option<Duration>
|
||||
},
|
||||
}
|
||||
|
||||
@ -100,7 +100,7 @@ impl<F> FenceSignalFuture<F> where F: GpuFuture {
|
||||
|
||||
match *state {
|
||||
FenceSignalFutureState::Flushed(_, ref fence) => {
|
||||
match fence.wait(Duration::from_secs(0)) {
|
||||
match fence.wait(Some(Duration::from_secs(0))) {
|
||||
Ok(()) => (),
|
||||
Err(_) => return,
|
||||
}
|
||||
@ -333,9 +333,8 @@ impl<F> Drop for FenceSignalFuture<F> where F: GpuFuture {
|
||||
match mem::replace(&mut *state, FenceSignalFutureState::Cleaned) {
|
||||
FenceSignalFutureState::Flushed(previous, fence) => {
|
||||
// This is a normal situation. Submitting worked.
|
||||
// TODO: arbitrary timeout?
|
||||
// TODO: handle errors?
|
||||
fence.wait(Duration::from_secs(600)).unwrap();
|
||||
fence.wait(None).unwrap();
|
||||
unsafe { previous.signal_finished(); }
|
||||
},
|
||||
FenceSignalFutureState::Cleaned => {
|
||||
|
@ -10,7 +10,6 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use buffer::BufferAccess;
|
||||
use command_buffer::CommandBuffer;
|
||||
@ -197,7 +196,7 @@ pub unsafe trait GpuFuture: DeviceOwned {
|
||||
#[inline]
|
||||
fn then_signal_fence(self) -> FenceSignalFuture<Self> where Self: Sized {
|
||||
fence_signal::then_signal_fence(self, FenceSignalFutureBehavior::Block {
|
||||
timeout: Duration::from_millis(600) // TODO: arbitrary duration
|
||||
timeout: None
|
||||
})
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user