Run rustfmt on the code (#807)

* Run rustfmt on the code

* Fix compilation
This commit is contained in:
tomaka 2017-09-06 21:35:06 +02:00 committed by GitHub
parent 5ac98f53f1
commit 9662f8b092
74 changed files with 2559 additions and 1546 deletions

View File

@ -124,8 +124,7 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
let num_bindings_in_set_body = {
(0 .. num_sets)
.map(|set| {
let num =
descriptors
let num = descriptors
.iter()
.filter(|d| d.set == set)
.fold(0, |s, d| cmp::max(s, 1 + d.binding));

View File

@ -62,10 +62,10 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
let (ty, f_call) = {
if let enums::ExecutionModel::ExecutionModelGLCompute = *execution {
(format!("::vulkano::pipeline::shader::ComputeEntryPoint<{}, Layout>", spec_consts_struct),
format!("compute_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as \
*const _), Layout(ShaderStages {{ compute: true, .. ShaderStages::none() \
}}))"))
(format!("::vulkano::pipeline::shader::ComputeEntryPoint<{}, Layout>",
spec_consts_struct),
format!("compute_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), \
Layout(ShaderStages {{ compute: true, .. ShaderStages::none() }}))"))
} else {
let ty = match *execution {
@ -74,36 +74,50 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
},
enums::ExecutionModel::ExecutionModelTessellationControl => {
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationControl".to_owned()
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationControl"
.to_owned()
},
enums::ExecutionModel::ExecutionModelTessellationEvaluation => {
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationEvaluation".to_owned()
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationEvaluation"
.to_owned()
},
enums::ExecutionModel::ExecutionModelGeometry => {
let mut execution_mode = None;
for instruction in doc.instructions.iter() {
if let &parse::Instruction::ExecutionMode { target_id, ref mode, .. } = instruction {
if let &parse::Instruction::ExecutionMode {
target_id,
ref mode,
..
} = instruction
{
if target_id != id {
continue;
}
execution_mode = match mode {
&enums::ExecutionMode::ExecutionModeInputPoints => Some("Points"),
&enums::ExecutionMode::ExecutionModeInputLines => Some("Lines"),
&enums::ExecutionMode::ExecutionModeInputLinesAdjacency => Some("LinesWithAdjacency"),
&enums::ExecutionMode::ExecutionModeInputLinesAdjacency =>
Some("LinesWithAdjacency"),
&enums::ExecutionMode::ExecutionModeTriangles => Some("Triangles"),
&enums::ExecutionMode::ExecutionModeInputTrianglesAdjacency => Some("TrianglesWithAdjacency"),
&enums::ExecutionMode::ExecutionModeInputTrianglesAdjacency =>
Some("TrianglesWithAdjacency"),
_ => continue,
};
break;
}
}
format!("::vulkano::pipeline::shader::GraphicsShaderType::Geometry(
format!(
"::vulkano::pipeline::shader::GraphicsShaderType::Geometry(
\
::vulkano::pipeline::shader::GeometryShaderExecutionMode::{0}
)", execution_mode.unwrap())
\
)",
execution_mode.unwrap()
)
},
enums::ExecutionModel::ExecutionModelFragment => {
@ -143,7 +157,9 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
capitalized_ep_name);
let f = format!("graphics_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() \
as *const _), {0}Input, {0}Output, Layout({2}), {1})",
capitalized_ep_name, ty, stage);
capitalized_ep_name,
ty,
stage);
(t, f)
}

View File

@ -166,14 +166,8 @@ pub enum Instruction {
result_id: u32,
data: Vec<u32>,
},
SpecConstantTrue {
result_type_id: u32,
result_id: u32,
},
SpecConstantFalse {
result_type_id: u32,
result_id: u32,
},
SpecConstantTrue { result_type_id: u32, result_id: u32 },
SpecConstantFalse { result_type_id: u32, result_id: u32 },
SpecConstant {
result_type_id: u32,
result_id: u32,
@ -256,7 +250,7 @@ fn decode_instruction(opcode: u16, operands: &[u32]) -> Result<Instruction, Pars
mode: ExecutionMode::from_num(operands[1])?,
optional_literals: operands[2 ..].to_vec(),
}
}
},
17 => Instruction::Capability(Capability::from_num(operands[0])?),
19 => Instruction::TypeVoid { result_id: operands[0] },
20 => Instruction::TypeBool { result_id: operands[0] },

View File

@ -43,19 +43,39 @@ pub fn write_specialization_constants(doc: &parse::Spirv) -> String {
for instruction in doc.instructions.iter() {
let (type_id, result_id, default_value) = match instruction {
&parse::Instruction::SpecConstantTrue { result_type_id, result_id } => {
&parse::Instruction::SpecConstantTrue {
result_type_id,
result_id,
} => {
(result_type_id, result_id, "1u32".to_string())
},
&parse::Instruction::SpecConstantFalse { result_type_id, result_id } => {
&parse::Instruction::SpecConstantFalse {
result_type_id,
result_id,
} => {
(result_type_id, result_id, "0u32".to_string())
},
&parse::Instruction::SpecConstant { result_type_id, result_id, ref data } => {
let data = data.iter().map(|d| d.to_string() + "u32").collect::<Vec<_>>().join(", ");
&parse::Instruction::SpecConstant {
result_type_id,
result_id,
ref data,
} => {
let data = data.iter()
.map(|d| d.to_string() + "u32")
.collect::<Vec<_>>()
.join(", ");
let def_val = format!("unsafe {{ ::std::mem::transmute([{}]) }}", data);
(result_type_id, result_id, def_val)
},
&parse::Instruction::SpecConstantComposite { result_type_id, result_id, ref data } => {
let data = data.iter().map(|d| d.to_string() + "u32").collect::<Vec<_>>().join(", ");
&parse::Instruction::SpecConstantComposite {
result_type_id,
result_id,
ref data,
} => {
let data = data.iter()
.map(|d| d.to_string() + "u32")
.collect::<Vec<_>>()
.join(", ");
let def_val = format!("unsafe {{ ::std::mem::transmute([{}]) }}", data);
(result_type_id, result_id, def_val)
},
@ -65,17 +85,20 @@ pub fn write_specialization_constants(doc: &parse::Spirv) -> String {
let (rust_ty, rust_size, rust_alignment) = spec_const_type_from_id(doc, type_id);
let rust_size = rust_size.expect("Found runtime-sized specialization constant");
let constant_id = doc.instructions.iter().filter_map(|i| {
match i {
&parse::Instruction::Decorate
{ target_id, decoration: enums::Decoration::DecorationSpecId, ref params }
if target_id == result_id =>
{
let constant_id = doc.instructions
.iter()
.filter_map(|i| match i {
&parse::Instruction::Decorate {
target_id,
decoration: enums::Decoration::DecorationSpecId,
ref params,
} if target_id == result_id => {
Some(params[0])
},
_ => None,
}
}).next().expect("Found a specialization constant with no SpecId decoration");
})
.next()
.expect("Found a specialization constant with no SpecId decoration");
spec_consts.push(SpecConst {
name: ::name_from_id(doc, result_id),
@ -91,11 +114,18 @@ pub fn write_specialization_constants(doc: &parse::Spirv) -> String {
let mut map_entries = Vec::new();
let mut curr_offset = 0;
for c in &spec_consts {
map_entries.push(format!("SpecializationMapEntry {{
constant_id: {},
map_entries.push(format!(
"SpecializationMapEntry {{
constant_id: \
{},
offset: {},
size: {},
}}", c.constant_id, curr_offset, c.rust_size));
\
}}",
c.constant_id,
curr_offset,
c.rust_size
));
assert_ne!(c.rust_size, 0);
curr_offset += c.rust_size;
@ -104,7 +134,8 @@ pub fn write_specialization_constants(doc: &parse::Spirv) -> String {
map_entries
};
format!(r#"
format!(
r#"
#[derive(Debug, Copy, Clone)]
#[allow(non_snake_case)]
@ -131,10 +162,16 @@ unsafe impl SpecConstsTrait for SpecializationConstants {{
}}
"#,
struct_def = spec_consts.iter().map(|c| format!("pub {}: {}", c.name, c.rust_ty))
.collect::<Vec<_>>().join(", "),
def_vals = spec_consts.iter().map(|c| format!("{}: {}", c.name, c.default_value))
.collect::<Vec<_>>().join(", "),
struct_def = spec_consts
.iter()
.map(|c| format!("pub {}: {}", c.name, c.rust_ty))
.collect::<Vec<_>>()
.join(", "),
def_vals = spec_consts
.iter()
.map(|c| format!("{}: {}", c.name, c.default_value))
.collect::<Vec<_>>()
.join(", "),
num_map_entries = map_entries.len(),
map_entries = map_entries.join(", ")
)
@ -147,7 +184,7 @@ fn spec_const_type_from_id(doc: &parse::Spirv, searched: u32) -> (String, Option
&parse::Instruction::TypeBool { result_id } if result_id == searched => {
return ("u32".to_owned(), Some(mem::size_of::<u32>()), mem::align_of::<u32>());
},
_ => ()
_ => (),
}
}

View File

@ -195,9 +195,8 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
// We can only implement Clone if there's no unsized member in the struct.
let (impl_text, derive_text) = if current_rust_offset.is_some() {
let i =
format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n {name} \
{{\n{copies}\n }}\n }}\n}}\n",
let i = format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n \
{name} {{\n{copies}\n }}\n }}\n}}\n",
name = name,
copies = rust_members
.iter()
@ -209,10 +208,9 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
("".to_owned(), "")
};
let s = format!("#[repr(C)]\n\
{derive_text}\n\
#[allow(non_snake_case)]\n\
pub struct {name} {{\n{members}\n}} /* total_size: {t:?} */\n{impl_text}",
let s =
format!("#[repr(C)]\n{derive_text}\n#[allow(non_snake_case)]\npub struct {name} \
{{\n{members}\n}} /* total_size: {t:?} */\n{impl_text}",
name = name,
members = rust_members
.iter()

View File

@ -25,12 +25,12 @@ use std::mem;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ptr;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::RwLockReadGuard;
use std::sync::RwLockWriteGuard;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use buffer::BufferUsage;
use buffer::sys::BufferCreationError;
@ -117,8 +117,7 @@ impl<T> CpuAccessibleBuffer<T> {
/// Builds a new uninitialized buffer. Only allowed for sized data.
#[inline]
pub unsafe fn uninitialized(device: Arc<Device>, usage: BufferUsage)
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
{
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError> {
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, iter::empty())
}
}
@ -129,12 +128,11 @@ impl<T> CpuAccessibleBuffer<[T]> {
pub fn from_iter<I>(device: Arc<Device>, usage: BufferUsage, data: I)
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError>
where I: ExactSizeIterator<Item = T>,
T: Content + 'static,
T: Content + 'static
{
unsafe {
let uninitialized = CpuAccessibleBuffer::uninitialized_array(device,
data.len(),
usage)?;
let uninitialized =
CpuAccessibleBuffer::uninitialized_array(device, data.len(), usage)?;
// Note that we are in panic-unsafety land here. However a panic should never ever
// happen here, so in theory we are safe.
@ -154,9 +152,9 @@ impl<T> CpuAccessibleBuffer<[T]> {
/// Builds a new buffer. Can be used for arrays.
#[inline]
pub unsafe fn uninitialized_array(device: Arc<Device>, len: usize, usage: BufferUsage)
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError>
{
pub unsafe fn uninitialized_array(
device: Arc<Device>, len: usize, usage: BufferUsage)
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError> {
CpuAccessibleBuffer::raw(device, len * mem::size_of::<T>(), usage, iter::empty())
}
}
@ -169,7 +167,8 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
/// You must ensure that the size that you pass is correct for `T`.
///
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage,
queue_families: I) -> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
queue_families: I)
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families
@ -206,7 +205,7 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
inner: buffer,
memory: mem,
access: RwLock::new(CurrentGpuAccess::NonExclusive {
num: AtomicUsize::new(0)
num: AtomicUsize::new(0),
}),
queue_families: queue_families,
marker: PhantomData,
@ -233,7 +232,7 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> {
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A>
where T: Content + 'static,
A: MemoryPoolAlloc,
A: MemoryPoolAlloc
{
/// Locks the buffer in order to read its content from the CPU.
///
@ -569,8 +568,6 @@ mod tests {
const EMPTY: [i32; 0] = [];
let _ = CpuAccessibleBuffer::from_data(device,
BufferUsage::all(),
EMPTY.iter());
let _ = CpuAccessibleBuffer::from_data(device, BufferUsage::all(), EMPTY.iter());
}
}

View File

@ -29,14 +29,14 @@ use device::Device;
use device::DeviceOwned;
use device::Queue;
use memory::DedicatedAlloc;
use memory::pool::AllocLayout;
use memory::DeviceMemoryAllocError;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPool;
use memory::DeviceMemoryAllocError;
use sync::AccessError;
use sync::Sharing;
@ -286,9 +286,7 @@ impl<T, A> CpuBufferPool<T, A>
/// > large enough, a new chunk of memory is automatically allocated.
#[inline]
pub fn next(&self, data: T) -> CpuBufferPoolSubbuffer<T, A> {
CpuBufferPoolSubbuffer {
chunk: self.chunk(iter::once(data))
}
CpuBufferPoolSubbuffer { chunk: self.chunk(iter::once(data)) }
}
/// Grants access to a new subbuffer and puts `data` in it.
@ -317,7 +315,8 @@ impl<T, A> CpuBufferPool<T, A>
};
// TODO: choose the capacity better?
let next_capacity = cmp::max(data.len(), 1) * match *mutex {
let next_capacity = cmp::max(data.len(), 1) *
match *mutex {
Some(ref b) => b.capacity * 2,
None => 3,
};
@ -339,9 +338,9 @@ impl<T, A> CpuBufferPool<T, A>
#[inline]
pub fn try_next(&self, data: T) -> Option<CpuBufferPoolSubbuffer<T, A>> {
let mut mutex = self.current_buffer.lock().unwrap();
self.try_next_impl(&mut mutex, iter::once(data)).map(|c| {
CpuBufferPoolSubbuffer { chunk: c }
}).ok()
self.try_next_impl(&mut mutex, iter::once(data))
.map(|c| CpuBufferPoolSubbuffer { chunk: c })
.ok()
}
// Creates a new buffer and sets it as current. The capacity is in number of elements.
@ -354,7 +353,8 @@ impl<T, A> CpuBufferPool<T, A>
let (buffer, mem_reqs) = {
let size_bytes = match mem::size_of::<T>().checked_mul(capacity) {
Some(s) => s,
None => return Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)),
None =>
return Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)),
};
match UnsafeBuffer::new(self.device.clone(),
@ -379,8 +379,7 @@ impl<T, A> CpuBufferPool<T, A>
debug_assert!(mem.mapped_memory().is_some());
buffer.bind_memory(mem.memory(), mem.offset())?;
**cur_buf_mutex =
Some(Arc::new(ActualBuffer {
**cur_buf_mutex = Some(Arc::new(ActualBuffer {
inner: buffer,
memory: mem,
chunks_in_use: Mutex::new(vec![]),
@ -403,7 +402,8 @@ impl<T, A> CpuBufferPool<T, A>
// Panicks if the length of the iterator didn't match the actual number of element.
//
fn try_next_impl<I>(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>,
mut data: I) -> Result<CpuBufferPoolChunk<T, A>, I>
mut data: I)
-> Result<CpuBufferPoolChunk<T, A>, I>
where I: ExactSizeIterator<Item = T>
{
// Grab the current buffer. Return `Err` if the pool wasn't "initialized" yet.
@ -440,23 +440,30 @@ impl<T, A> CpuBufferPool<T, A>
// own a mutex lock to the buffer, it means that `next_index` can't be accessed
// concurrently.
// TODO: ^ eventually should be put inside the mutex
let idx = current_buffer
.next_index
.load(Ordering::SeqCst);
let idx = current_buffer.next_index.load(Ordering::SeqCst);
// Find the required alignment in bytes.
let align_bytes = cmp::max(
if self.usage.uniform_buffer {
self.device().physical_device().limits()
.min_uniform_buffer_offset_alignment() as usize
} else { 1 },
let align_bytes = cmp::max(if self.usage.uniform_buffer {
self.device()
.physical_device()
.limits()
.min_uniform_buffer_offset_alignment() as
usize
} else {
1
},
if self.usage.storage_buffer {
self.device().physical_device().limits()
.min_storage_buffer_offset_alignment() as usize
} else { 1 },
);
self.device()
.physical_device()
.limits()
.min_storage_buffer_offset_alignment() as
usize
} else {
1
});
let tentative_align_offset = (align_bytes - ((idx * mem::size_of::<T>()) % align_bytes)) % align_bytes;
let tentative_align_offset =
(align_bytes - ((idx * mem::size_of::<T>()) % align_bytes)) % align_bytes;
let additional_len = if tentative_align_offset == 0 {
0
} else {
@ -468,8 +475,12 @@ impl<T, A> CpuBufferPool<T, A>
// Find out whether any chunk in use overlaps this range.
if tentative_index + tentative_len <= current_buffer.capacity &&
!chunks_in_use.iter().any(|c| (c.index >= tentative_index && c.index < tentative_index + tentative_len) ||
(c.index <= tentative_index && c.index + c.len > tentative_index))
!chunks_in_use.iter().any(|c| {
(c.index >= tentative_index &&
c.index < tentative_index + tentative_len) ||
(c.index <= tentative_index &&
c.index + c.len > tentative_index)
})
{
(tentative_index, tentative_len, tentative_align_offset)
} else {
@ -501,13 +512,16 @@ impl<T, A> CpuBufferPool<T, A>
ptr::write(o, i);
written += 1;
}
assert_eq!(written, requested_len,
assert_eq!(written,
requested_len,
"Iterator passed to CpuBufferPool::chunk has a mismatch between reported \
length and actual number of elements");
}
// Mark the chunk as in use.
current_buffer.next_index.store(index + occupied_len, Ordering::SeqCst);
current_buffer
.next_index
.store(index + occupied_len, Ordering::SeqCst);
chunks_in_use.push(ActualBufferChunk {
index,
len: occupied_len,
@ -557,10 +571,15 @@ impl<T, A> Clone for CpuBufferPoolChunk<T, A>
{
fn clone(&self) -> CpuBufferPoolChunk<T, A> {
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
let chunk = chunks_in_use_lock
.iter_mut()
.find(|c| c.index == self.index)
.unwrap();
debug_assert!(chunk.num_cpu_accesses >= 1);
chunk.num_cpu_accesses = chunk.num_cpu_accesses.checked_add(1)
chunk.num_cpu_accesses = chunk
.num_cpu_accesses
.checked_add(1)
.expect("Overflow in CPU accesses");
CpuBufferPoolChunk {
@ -601,7 +620,10 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
}
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
let chunk = chunks_in_use_lock
.iter_mut()
.find(|c| c.index == self.index)
.unwrap();
if chunk.num_gpu_accesses != 0 {
return Err(AccessError::AlreadyInUse);
@ -618,10 +640,15 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
}
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
let chunk = chunks_in_use_lock
.iter_mut()
.find(|c| c.index == self.index)
.unwrap();
debug_assert!(chunk.num_gpu_accesses >= 1);
chunk.num_gpu_accesses = chunk.num_gpu_accesses.checked_add(1)
chunk.num_gpu_accesses = chunk
.num_gpu_accesses
.checked_add(1)
.expect("Overflow in GPU usages");
}
@ -632,7 +659,10 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
}
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
let chunk = chunks_in_use_lock
.iter_mut()
.find(|c| c.index == self.index)
.unwrap();
debug_assert!(chunk.num_gpu_accesses >= 1);
chunk.num_gpu_accesses -= 1;
@ -649,7 +679,10 @@ impl<T, A> Drop for CpuBufferPoolChunk<T, A>
}
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
let chunk_num = chunks_in_use_lock.iter_mut().position(|c| c.index == self.index).unwrap();
let chunk_num = chunks_in_use_lock
.iter_mut()
.position(|c| c.index == self.index)
.unwrap();
if chunks_in_use_lock[chunk_num].num_cpu_accesses >= 2 {
chunks_in_use_lock[chunk_num].num_cpu_accesses -= 1;
@ -679,9 +712,7 @@ impl<T, A> Clone for CpuBufferPoolSubbuffer<T, A>
where A: MemoryPool
{
fn clone(&self) -> CpuBufferPoolSubbuffer<T, A> {
CpuBufferPoolSubbuffer {
chunk: self.chunk.clone(),
}
CpuBufferPoolSubbuffer { chunk: self.chunk.clone() }
}
}

View File

@ -31,14 +31,14 @@ use device::DeviceOwned;
use device::Queue;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::pool::AllocLayout;
use memory::DeviceMemoryAllocError;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use memory::DeviceMemoryAllocError;
use sync::AccessError;
use sync::Sharing;
@ -222,7 +222,7 @@ unsafe impl<T: ?Sized, A> BufferAccess for DeviceLocalBuffer<T, A>
},
&mut GpuAccess::Exclusive { .. } => {
Err(AccessError::AlreadyInUse)
}
},
}
}
@ -238,7 +238,7 @@ unsafe impl<T: ?Sized, A> BufferAccess for DeviceLocalBuffer<T, A>
GpuAccess::Exclusive { ref mut num } => {
debug_assert!(*num >= 1);
*num += 1;
}
},
}
}

View File

@ -41,6 +41,8 @@ use device::Device;
use device::DeviceOwned;
use device::Queue;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
@ -48,8 +50,6 @@ use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use sync::AccessError;
use sync::NowFuture;
use sync::Sharing;
@ -90,7 +90,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
/// be finished before submitting your own operation.
pub fn from_data(
data: T, usage: BufferUsage, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
DeviceMemoryAllocError>
where T: 'static + Send + Sync + Sized
{
let source = CpuAccessibleBuffer::from_data(queue.device().clone(),
@ -107,7 +108,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
/// be finished before submitting your own operation.
pub fn from_buffer<B>(
source: B, usage: BufferUsage, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
DeviceMemoryAllocError>
where B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync,
T: 'static + Send + Sync
{
@ -158,9 +160,11 @@ impl<T> ImmutableBuffer<T> {
#[inline]
pub unsafe fn uninitialized(
device: Arc<Device>, usage: BufferUsage)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
{
ImmutableBuffer::raw(device.clone(), mem::size_of::<T>(), usage,
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
DeviceMemoryAllocError> {
ImmutableBuffer::raw(device.clone(),
mem::size_of::<T>(),
usage,
device.active_queue_families())
}
}
@ -168,7 +172,8 @@ impl<T> ImmutableBuffer<T> {
impl<T> ImmutableBuffer<[T]> {
pub fn from_iter<D>(
data: D, usage: BufferUsage, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture),
DeviceMemoryAllocError>
where D: ExactSizeIterator<Item = T>,
T: 'static + Send + Sync + Sized
{
@ -195,12 +200,14 @@ impl<T> ImmutableBuffer<[T]> {
/// data, otherwise the content is undefined.
///
#[inline]
pub unsafe fn uninitialized_array(
device: Arc<Device>, len: usize, usage: BufferUsage)
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferInitialization<[T]>), DeviceMemoryAllocError>
{
ImmutableBuffer::raw(device.clone(), len * mem::size_of::<T>(),
usage, device.active_queue_families())
pub unsafe fn uninitialized_array(device: Arc<Device>, len: usize, usage: BufferUsage)
-> Result<(Arc<ImmutableBuffer<[T]>>,
ImmutableBufferInitialization<[T]>),
DeviceMemoryAllocError> {
ImmutableBuffer::raw(device.clone(),
len * mem::size_of::<T>(),
usage,
device.active_queue_families())
}
}
@ -223,7 +230,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
#[inline]
pub unsafe fn raw<'a, I>(
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
DeviceMemoryAllocError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
@ -234,7 +242,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
// inlined.
unsafe fn raw_impl(
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: SmallVec<[u32; 4]>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError> {
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
DeviceMemoryAllocError> {
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.iter().cloned())
@ -436,14 +445,10 @@ mod tests {
fn from_data_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_data(12u32,
BufferUsage::all(),
queue.clone())
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone())
.unwrap();
let destination = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
0)
let destination = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0)
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
@ -496,13 +501,13 @@ mod tests {
fn writing_forbidden() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_data(12u32,
BufferUsage::all(),
queue.clone())
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone())
.unwrap();
assert_should_panic!({ // TODO: check Result error instead of panicking
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
assert_should_panic!({
// TODO: check Result error instead of panicking
let _ = AutoCommandBufferBuilder::new(device.clone(),
queue.family())
.unwrap()
.fill_buffer(buffer, 50)
.unwrap()
@ -520,18 +525,15 @@ mod tests {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(),
BufferUsage::all())
.unwrap()
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
0)
.unwrap();
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
assert_should_panic!({ // TODO: check Result error instead of panicking
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
assert_should_panic!({
// TODO: check Result error instead of panicking
let _ = AutoCommandBufferBuilder::new(device.clone(),
queue.family())
.unwrap()
.copy_buffer(source, buffer)
.unwrap()
@ -549,15 +551,10 @@ mod tests {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(),
BufferUsage::all())
.unwrap()
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
0)
.unwrap();
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
@ -579,15 +576,10 @@ mod tests {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(),
BufferUsage::all())
.unwrap()
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
0)
.unwrap();
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
@ -615,8 +607,7 @@ mod tests {
fn create_buffer_zero_size_data() {
let (device, queue) = gfx_dev_and_queue!();
let _ =
ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone());
let _ = ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone());
}
// TODO: write tons of tests that try to exploit loopholes

View File

@ -146,7 +146,9 @@ impl UnsafeBuffer {
let mut output = vk::MemoryRequirements2KHR {
sType: vk::STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
pNext: output2.as_mut().map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
pNext: output2
.as_mut()
.map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
.unwrap_or(ptr::null_mut()) as *mut _,
memoryRequirements: mem::uninitialized(),
};
@ -416,8 +418,10 @@ impl From<Error> for BufferCreationError {
#[inline]
fn from(err: Error) -> BufferCreationError {
match err {
err @ Error::OutOfHostMemory => BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
err @ Error::OutOfDeviceMemory => BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
err @ Error::OutOfHostMemory =>
BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
err @ Error::OutOfDeviceMemory =>
BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
_ => panic!("unexpected error: {:?}", err),
}
}
@ -462,7 +466,8 @@ mod tests {
};
assert_should_panic!("Can't enable sparse residency without enabling sparse \
binding as well", {
binding as well",
{
let _ = unsafe {
UnsafeBuffer::new(device,
128,
@ -483,7 +488,8 @@ mod tests {
};
assert_should_panic!("Can't enable sparse aliasing without enabling sparse \
binding as well", {
binding as well",
{
let _ = unsafe {
UnsafeBuffer::new(device,
128,

View File

@ -384,9 +384,8 @@ mod tests {
..BufferUsage::none()
};
let (buffer, _) = ImmutableBuffer::<[u32]>::from_iter((0 .. 128).map(|_| 0),
usage,
queue.clone())
let (buffer, _) =
ImmutableBuffer::<[u32]>::from_iter((0 .. 128).map(|_| 0), usage, queue.clone())
.unwrap();
let view = BufferView::new(buffer, format::R32Uint).unwrap();

View File

@ -12,9 +12,9 @@ use std::fmt;
use std::iter;
use std::mem;
use std::slice;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use OomError;
use buffer::BufferAccess;
@ -126,10 +126,12 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// destroyed. This makes it possible for the implementation to perform additional
/// optimizations.
#[inline]
pub fn primary_one_time_submit(device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
AutoCommandBufferBuilder::with_flags(device, queue_family, Kind::primary(),
pub fn primary_one_time_submit(
device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
AutoCommandBufferBuilder::with_flags(device,
queue_family,
Kind::primary(),
Flags::OneTimeSubmit)
}
@ -138,10 +140,12 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Contrary to `primary`, the final command buffer can be executed multiple times in parallel
/// in multiple different queues.
#[inline]
pub fn primary_simultaneous_use(device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
AutoCommandBufferBuilder::with_flags(device, queue_family, Kind::primary(),
pub fn primary_simultaneous_use(
device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
AutoCommandBufferBuilder::with_flags(device,
queue_family,
Kind::primary(),
Flags::SimultaneousUse)
}
@ -150,7 +154,8 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// The final command buffer can only be executed once at a time. In other words, it is as if
/// executing the command buffer modifies it.
#[inline]
pub fn secondary_compute(device: Arc<Device>, queue_family: QueueFamily)
pub fn secondary_compute(
device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
let kind = Kind::secondary(KindOcclusionQuery::Forbidden,
QueryPipelineStatisticFlags::none());
@ -163,9 +168,9 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// being destroyed. This makes it possible for the implementation to perform additional
/// optimizations.
#[inline]
pub fn secondary_compute_one_time_submit(device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
pub fn secondary_compute_one_time_submit(
device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
let kind = Kind::secondary(KindOcclusionQuery::Forbidden,
QueryPipelineStatisticFlags::none());
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::OneTimeSubmit)
@ -176,9 +181,9 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Contrary to `secondary_compute`, the final command buffer can be executed multiple times in
/// parallel in multiple different queues.
#[inline]
pub fn secondary_compute_simultaneous_use(device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
pub fn secondary_compute_simultaneous_use(
device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
let kind = Kind::secondary(KindOcclusionQuery::Forbidden,
QueryPipelineStatisticFlags::none());
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::SimultaneousUse)
@ -186,8 +191,8 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Same as `secondary_compute`, but allows specifying how queries are being inherited.
#[inline]
pub fn secondary_compute_inherit_queries(device: Arc<Device>, queue_family: QueueFamily,
occlusion_query: KindOcclusionQuery,
pub fn secondary_compute_inherit_queries(
device: Arc<Device>, queue_family: QueueFamily, occlusion_query: KindOcclusionQuery,
query_statistics_flags: QueryPipelineStatisticFlags)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
let kind = Kind::secondary(occlusion_query, query_statistics_flags);
@ -196,24 +201,20 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Same as `secondary_compute_one_time_submit`, but allows specifying how queries are being inherited.
#[inline]
pub fn secondary_compute_one_time_submit_inherit_queries(device: Arc<Device>,
queue_family: QueueFamily,
occlusion_query: KindOcclusionQuery,
pub fn secondary_compute_one_time_submit_inherit_queries(
device: Arc<Device>, queue_family: QueueFamily, occlusion_query: KindOcclusionQuery,
query_statistics_flags: QueryPipelineStatisticFlags)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
let kind = Kind::secondary(occlusion_query, query_statistics_flags);
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::OneTimeSubmit)
}
/// Same as `secondary_compute_simultaneous_use`, but allows specifying how queries are being inherited.
#[inline]
pub fn secondary_compute_simultaneous_use_inherit_queries(device: Arc<Device>,
queue_family: QueueFamily,
occlusion_query: KindOcclusionQuery,
pub fn secondary_compute_simultaneous_use_inherit_queries(
device: Arc<Device>, queue_family: QueueFamily, occlusion_query: KindOcclusionQuery,
query_statistics_flags: QueryPipelineStatisticFlags)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
let kind = Kind::secondary(occlusion_query, query_statistics_flags);
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::SimultaneousUse)
}
@ -223,14 +224,17 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// The final command buffer can only be executed once at a time. In other words, it is as if
/// executing the command buffer modifies it.
#[inline]
pub fn secondary_graphics<R>(device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
pub fn secondary_graphics<R>(
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
where R: RenderPassAbstract + Clone + Send + Sync + 'static
{
let kind = Kind::Secondary {
render_pass: Some(KindSecondaryRenderPass {
subpass,
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
framebuffer:
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
()>>,
}),
occlusion_query: KindOcclusionQuery::Forbidden,
query_statistics_flags: QueryPipelineStatisticFlags::none(),
@ -245,15 +249,17 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// before being destroyed. This makes it possible for the implementation to perform additional
/// optimizations.
#[inline]
pub fn secondary_graphics_one_time_submit<R>(device: Arc<Device>, queue_family: QueueFamily,
subpass: Subpass<R>)
pub fn secondary_graphics_one_time_submit<R>(
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
where R: RenderPassAbstract + Clone + Send + Sync + 'static
{
let kind = Kind::Secondary {
render_pass: Some(KindSecondaryRenderPass {
subpass,
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
framebuffer:
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
()>>,
}),
occlusion_query: KindOcclusionQuery::Forbidden,
query_statistics_flags: QueryPipelineStatisticFlags::none(),
@ -267,15 +273,17 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Contrary to `secondary_graphics`, the final command buffer can be executed multiple times
/// in parallel in multiple different queues.
#[inline]
pub fn secondary_graphics_simultaneous_use<R>(device: Arc<Device>, queue_family: QueueFamily,
subpass: Subpass<R>)
pub fn secondary_graphics_simultaneous_use<R>(
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
where R: RenderPassAbstract + Clone + Send + Sync + 'static
{
let kind = Kind::Secondary {
render_pass: Some(KindSecondaryRenderPass {
subpass,
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
framebuffer:
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
()>>,
}),
occlusion_query: KindOcclusionQuery::Forbidden,
query_statistics_flags: QueryPipelineStatisticFlags::none(),
@ -286,17 +294,18 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Same as `secondary_graphics`, but allows specifying how queries are being inherited.
#[inline]
pub fn secondary_graphics_inherit_queries<R>(device: Arc<Device>, queue_family: QueueFamily,
subpass: Subpass<R>,
occlusion_query: KindOcclusionQuery,
query_statistics_flags: QueryPipelineStatisticFlags)
pub fn secondary_graphics_inherit_queries<R>(
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>,
occlusion_query: KindOcclusionQuery, query_statistics_flags: QueryPipelineStatisticFlags)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
where R: RenderPassAbstract + Clone + Send + Sync + 'static
{
let kind = Kind::Secondary {
render_pass: Some(KindSecondaryRenderPass {
subpass,
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
framebuffer:
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
()>>,
}),
occlusion_query,
query_statistics_flags,
@ -307,17 +316,18 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Same as `secondary_graphics_one_time_submit`, but allows specifying how queries are being inherited.
#[inline]
pub fn secondary_graphics_one_time_submit_inherit_queries<R>(device: Arc<Device>,
queue_family: QueueFamily, subpass: Subpass<R>,
occlusion_query: KindOcclusionQuery,
query_statistics_flags: QueryPipelineStatisticFlags)
pub fn secondary_graphics_one_time_submit_inherit_queries<R>(
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>,
occlusion_query: KindOcclusionQuery, query_statistics_flags: QueryPipelineStatisticFlags)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
where R: RenderPassAbstract + Clone + Send + Sync + 'static
{
let kind = Kind::Secondary {
render_pass: Some(KindSecondaryRenderPass {
subpass,
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
framebuffer:
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
()>>,
}),
occlusion_query,
query_statistics_flags,
@ -328,17 +338,18 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
/// Same as `secondary_graphics_simultaneous_use`, but allows specifying how queries are being inherited.
#[inline]
pub fn secondary_graphics_simultaneous_use_inherit_queries<R>(device: Arc<Device>,
queue_family: QueueFamily, subpass: Subpass<R>,
occlusion_query: KindOcclusionQuery,
query_statistics_flags: QueryPipelineStatisticFlags)
pub fn secondary_graphics_simultaneous_use_inherit_queries<R>(
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>,
occlusion_query: KindOcclusionQuery, query_statistics_flags: QueryPipelineStatisticFlags)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
where R: RenderPassAbstract + Clone + Send + Sync + 'static
{
let kind = Kind::Secondary {
render_pass: Some(KindSecondaryRenderPass {
subpass,
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
framebuffer:
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
()>>,
}),
occlusion_query,
query_statistics_flags,
@ -398,8 +409,7 @@ impl<P> AutoCommandBufferBuilder<P> {
#[inline]
fn ensure_inside_render_pass_secondary(&self)
-> Result<(), AutoCommandBufferBuilderContextError>
{
-> Result<(), AutoCommandBufferBuilderContextError> {
if self.render_pass.is_some() {
if self.subpass_secondary {
Ok(())
@ -489,8 +499,11 @@ impl<P> AutoCommandBufferBuilder<P> {
let clear_values = framebuffer.convert_clear_values(clear_values);
let clear_values = clear_values.collect::<Vec<_>>().into_iter(); // TODO: necessary for Send + Sync ; needs an API rework of convert_clear_values
let contents = if secondary { SubpassContents::SecondaryCommandBuffers }
else { SubpassContents::Inline };
let contents = if secondary {
SubpassContents::SecondaryCommandBuffers
} else {
SubpassContents::Inline
};
self.inner
.begin_render_pass(framebuffer.clone(), contents, clear_values)?;
self.render_pass = Some((Box::new(framebuffer) as Box<_>, 0));
@ -530,12 +543,12 @@ impl<P> AutoCommandBufferBuilder<P> {
///
/// - Panics if the source or the destination was not created with `device`.
///
pub fn blit_image<S, D>(
mut self, source: S, source_top_left: [i32; 3], source_bottom_right: [i32; 3],
source_base_array_layer: u32, source_mip_level: u32, destination: D,
destination_top_left: [i32; 3], destination_bottom_right: [i32; 3],
destination_base_array_layer: u32, destination_mip_level: u32, layer_count: u32,
filter: Filter) -> Result<Self, BlitImageError>
pub fn blit_image<S, D>(mut self, source: S, source_top_left: [i32; 3],
source_bottom_right: [i32; 3], source_base_array_layer: u32,
source_mip_level: u32, destination: D, destination_top_left: [i32; 3],
destination_bottom_right: [i32; 3], destination_base_array_layer: u32,
destination_mip_level: u32, layer_count: u32, filter: Filter)
-> Result<Self, BlitImageError>
where S: ImageAccess + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static
{
@ -546,10 +559,18 @@ impl<P> AutoCommandBufferBuilder<P> {
self.ensure_outside_render_pass()?;
check_blit_image(self.device(), &source, source_top_left, source_bottom_right,
source_base_array_layer, source_mip_level, &destination,
destination_top_left, destination_bottom_right,
destination_base_array_layer, destination_mip_level, layer_count,
check_blit_image(self.device(),
&source,
source_top_left,
source_bottom_right,
source_base_array_layer,
source_mip_level,
&destination,
destination_top_left,
destination_bottom_right,
destination_base_array_layer,
destination_mip_level,
layer_count,
filter)?;
let blit = UnsafeCommandBufferBuilderImageBlit {
@ -574,8 +595,13 @@ impl<P> AutoCommandBufferBuilder<P> {
destination_bottom_right,
};
self.inner.blit_image(source, ImageLayout::TransferSrcOptimal, destination, // TODO: let choose layout
ImageLayout::TransferDstOptimal, iter::once(blit), filter)?;
self.inner
.blit_image(source,
ImageLayout::TransferSrcOptimal,
destination, // TODO: let choose layout
ImageLayout::TransferDstOptimal,
iter::once(blit),
filter)?;
Ok(self)
}
}
@ -589,7 +615,7 @@ impl<P> AutoCommandBufferBuilder<P> {
///
pub fn clear_color_image<I>(self, image: I, color: ClearValue)
-> Result<Self, ClearColorImageError>
where I: ImageAccess + Send + Sync + 'static,
where I: ImageAccess + Send + Sync + 'static
{
let layers = image.dimensions().array_layers();
let levels = image.mipmap_levels();
@ -606,7 +632,7 @@ impl<P> AutoCommandBufferBuilder<P> {
pub fn clear_color_image_dimensions<I>(mut self, image: I, first_layer: u32, num_layers: u32,
first_mipmap: u32, num_mipmaps: u32, color: ClearValue)
-> Result<Self, ClearColorImageError>
where I: ImageAccess + Send + Sync + 'static,
where I: ImageAccess + Send + Sync + 'static
{
unsafe {
if !self.graphics_allowed && !self.compute_allowed {
@ -614,11 +640,17 @@ impl<P> AutoCommandBufferBuilder<P> {
}
self.ensure_outside_render_pass()?;
check_clear_color_image(self.device(), &image, first_layer, num_layers,
first_mipmap, num_mipmaps)?;
check_clear_color_image(self.device(),
&image,
first_layer,
num_layers,
first_mipmap,
num_mipmaps)?;
match color {
ClearValue::Float(_) | ClearValue::Int(_) | ClearValue::Uint(_) => {},
ClearValue::Float(_) |
ClearValue::Int(_) |
ClearValue::Uint(_) => {},
_ => panic!("The clear color is not a color value"),
};
@ -630,7 +662,10 @@ impl<P> AutoCommandBufferBuilder<P> {
};
// TODO: let choose layout
self.inner.clear_color_image(image, ImageLayout::TransferDstOptimal, color,
self.inner
.clear_color_image(image,
ImageLayout::TransferDstOptimal,
color,
iter::once(region))?;
Ok(self)
}
@ -641,15 +676,17 @@ impl<P> AutoCommandBufferBuilder<P> {
/// This command will copy from the source to the destination. If their size is not equal, then
/// the amount of data copied is equal to the smallest of the two.
#[inline]
pub fn copy_buffer<S, D, T>(mut self, source: S, destination: D) -> Result<Self, CopyBufferError>
pub fn copy_buffer<S, D, T>(mut self, source: S, destination: D)
-> Result<Self, CopyBufferError>
where S: TypedBufferAccess<Content = T> + Send + Sync + 'static,
D: TypedBufferAccess<Content = T> + Send + Sync + 'static,
T: ?Sized,
T: ?Sized
{
unsafe {
self.ensure_outside_render_pass()?;
let infos = check_copy_buffer(self.device(), &source, &destination)?;
self.inner.copy_buffer(source, destination, iter::once((0, 0, infos.copy_size)))?;
self.inner
.copy_buffer(source, destination, iter::once((0, 0, infos.copy_size)))?;
Ok(self)
}
}
@ -659,7 +696,7 @@ impl<P> AutoCommandBufferBuilder<P> {
-> Result<Self, CopyBufferImageError>
where S: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static,
Format: AcceptsPixels<Px>,
Format: AcceptsPixels<Px>
{
self.ensure_outside_render_pass()?;
@ -668,19 +705,26 @@ impl<P> AutoCommandBufferBuilder<P> {
}
/// Adds a command that copies from a buffer to an image.
pub fn copy_buffer_to_image_dimensions<S, D, Px>(
mut self, source: S, destination: D, offset: [u32; 3], size: [u32; 3], first_layer: u32,
num_layers: u32, mipmap: u32) -> Result<Self, CopyBufferImageError>
pub fn copy_buffer_to_image_dimensions<S, D, Px>(mut self, source: S, destination: D,
offset: [u32; 3], size: [u32; 3],
first_layer: u32, num_layers: u32, mipmap: u32)
-> Result<Self, CopyBufferImageError>
where S: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static,
Format: AcceptsPixels<Px>,
Format: AcceptsPixels<Px>
{
unsafe {
self.ensure_outside_render_pass()?;
check_copy_buffer_image(self.device(), &source, &destination,
CheckCopyBufferImageTy::BufferToImage, offset, size,
first_layer, num_layers, mipmap)?;
check_copy_buffer_image(self.device(),
&source,
&destination,
CheckCopyBufferImageTy::BufferToImage,
offset,
size,
first_layer,
num_layers,
mipmap)?;
let copy = UnsafeCommandBufferBuilderBufferImageCopy {
buffer_offset: 0,
@ -702,7 +746,10 @@ impl<P> AutoCommandBufferBuilder<P> {
image_extent: size,
};
self.inner.copy_buffer_to_image(source, destination, ImageLayout::TransferDstOptimal, // TODO: let choose layout
self.inner
.copy_buffer_to_image(source,
destination,
ImageLayout::TransferDstOptimal, // TODO: let choose layout
iter::once(copy))?;
Ok(self)
}
@ -713,7 +760,7 @@ impl<P> AutoCommandBufferBuilder<P> {
-> Result<Self, CopyBufferImageError>
where S: ImageAccess + Send + Sync + 'static,
D: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
Format: AcceptsPixels<Px>,
Format: AcceptsPixels<Px>
{
self.ensure_outside_render_pass()?;
@ -722,19 +769,26 @@ impl<P> AutoCommandBufferBuilder<P> {
}
/// Adds a command that copies from an image to a buffer.
pub fn copy_image_to_buffer_dimensions<S, D, Px>(
mut self, source: S, destination: D, offset: [u32; 3], size: [u32; 3], first_layer: u32,
num_layers: u32, mipmap: u32) -> Result<Self, CopyBufferImageError>
pub fn copy_image_to_buffer_dimensions<S, D, Px>(mut self, source: S, destination: D,
offset: [u32; 3], size: [u32; 3],
first_layer: u32, num_layers: u32, mipmap: u32)
-> Result<Self, CopyBufferImageError>
where S: ImageAccess + Send + Sync + 'static,
D: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
Format: AcceptsPixels<Px>,
Format: AcceptsPixels<Px>
{
unsafe {
self.ensure_outside_render_pass()?;
check_copy_buffer_image(self.device(), &destination, &source,
CheckCopyBufferImageTy::ImageToBuffer, offset, size,
first_layer, num_layers, mipmap)?;
check_copy_buffer_image(self.device(),
&destination,
&source,
CheckCopyBufferImageTy::ImageToBuffer,
offset,
size,
first_layer,
num_layers,
mipmap)?;
let copy = UnsafeCommandBufferBuilderBufferImageCopy {
buffer_offset: 0,
@ -756,7 +810,10 @@ impl<P> AutoCommandBufferBuilder<P> {
image_extent: size,
};
self.inner.copy_image_to_buffer(source, ImageLayout::TransferSrcOptimal, destination, // TODO: let choose layout
self.inner
.copy_image_to_buffer(source,
ImageLayout::TransferSrcOptimal,
destination, // TODO: let choose layout
iter::once(copy))?;
Ok(self)
}
@ -785,7 +842,11 @@ impl<P> AutoCommandBufferBuilder<P> {
}
push_constants(&mut self.inner, pipeline.clone(), constants);
descriptor_sets(&mut self.inner, &mut self.state_cacher, false, pipeline.clone(), sets)?;
descriptor_sets(&mut self.inner,
&mut self.state_cacher,
false,
pipeline.clone(),
sets)?;
self.inner.dispatch(dimensions);
Ok(self)
@ -794,7 +855,8 @@ impl<P> AutoCommandBufferBuilder<P> {
#[inline]
pub fn draw<V, Gp, S, Pc>(mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, sets: S,
constants: Pc) -> Result<Self, DrawError>
constants: Pc)
-> Result<Self, DrawError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection
{
@ -817,21 +879,28 @@ impl<P> AutoCommandBufferBuilder<P> {
push_constants(&mut self.inner, pipeline.clone(), constants);
set_state(&mut self.inner, dynamic);
descriptor_sets(&mut self.inner, &mut self.state_cacher, true, pipeline.clone(), sets)?;
vertex_buffers(&mut self.inner, &mut self.state_cacher, vb_infos.vertex_buffers)?;
descriptor_sets(&mut self.inner,
&mut self.state_cacher,
true,
pipeline.clone(),
sets)?;
vertex_buffers(&mut self.inner,
&mut self.state_cacher,
vb_infos.vertex_buffers)?;
debug_assert!(self.graphics_allowed);
self.inner
.draw(vb_infos.vertex_count as u32, vb_infos.instance_count as u32, 0, 0);
self.inner.draw(vb_infos.vertex_count as u32,
vb_infos.instance_count as u32,
0,
0);
Ok(self)
}
}
#[inline]
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(
mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, index_buffer: Ib, sets: S,
constants: Pc)
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(mut self, pipeline: Gp, dynamic: DynamicState,
vertices: V, index_buffer: Ib, sets: S, constants: Pc)
-> Result<Self, DrawIndexedError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection,
@ -864,13 +933,20 @@ impl<P> AutoCommandBufferBuilder<P> {
push_constants(&mut self.inner, pipeline.clone(), constants);
set_state(&mut self.inner, dynamic);
descriptor_sets(&mut self.inner, &mut self.state_cacher, true, pipeline.clone(), sets)?;
vertex_buffers(&mut self.inner, &mut self.state_cacher, vb_infos.vertex_buffers)?;
descriptor_sets(&mut self.inner,
&mut self.state_cacher,
true,
pipeline.clone(),
sets)?;
vertex_buffers(&mut self.inner,
&mut self.state_cacher,
vb_infos.vertex_buffers)?;
// TODO: how to handle an index out of range of the vertex buffers?
debug_assert!(self.graphics_allowed);
self.inner.draw_indexed(ib_infos.num_indices as u32, 1, 0, 0, 0);
self.inner
.draw_indexed(ib_infos.num_indices as u32, 1, 0, 0, 0);
Ok(self)
}
}
@ -908,12 +984,19 @@ impl<P> AutoCommandBufferBuilder<P> {
push_constants(&mut self.inner, pipeline.clone(), constants);
set_state(&mut self.inner, dynamic);
descriptor_sets(&mut self.inner, &mut self.state_cacher, true, pipeline.clone(), sets)?;
vertex_buffers(&mut self.inner, &mut self.state_cacher, vb_infos.vertex_buffers)?;
descriptor_sets(&mut self.inner,
&mut self.state_cacher,
true,
pipeline.clone(),
sets)?;
vertex_buffers(&mut self.inner,
&mut self.state_cacher,
vb_infos.vertex_buffers)?;
debug_assert!(self.graphics_allowed);
self.inner.draw_indirect(indirect_buffer,
self.inner
.draw_indirect(indirect_buffer,
draw_count,
mem::size_of::<DrawIndirectCommand>() as u32)?;
Ok(self)
@ -932,8 +1015,7 @@ impl<P> AutoCommandBufferBuilder<P> {
}
match self.render_pass {
Some((ref rp, index))
if rp.num_subpasses() as u32 == index + 1 => (),
Some((ref rp, index)) if rp.num_subpasses() as u32 == index + 1 => (),
None => {
return Err(AutoCommandBufferBuilderContextError::ForbiddenOutsideRenderPass);
},
@ -1006,7 +1088,7 @@ impl<P> AutoCommandBufferBuilder<P> {
match self.render_pass {
None => {
return Err(AutoCommandBufferBuilderContextError::ForbiddenOutsideRenderPass)
return Err(AutoCommandBufferBuilderContextError::ForbiddenOutsideRenderPass);
},
Some((ref rp, ref mut index)) => {
if *index + 1 >= rp.num_subpasses() as u32 {
@ -1024,8 +1106,11 @@ impl<P> AutoCommandBufferBuilder<P> {
debug_assert!(self.graphics_allowed);
let contents = if secondary { SubpassContents::SecondaryCommandBuffers }
else { SubpassContents::Inline };
let contents = if secondary {
SubpassContents::SecondaryCommandBuffers
} else {
SubpassContents::Inline
};
self.inner.next_subpass(contents);
Ok(self)
}
@ -1110,8 +1195,7 @@ unsafe fn set_state<P>(destination: &mut SyncCommandBufferBuilder<P>, dynamic: D
unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
state_cacher: &mut StateCacher,
vertex_buffers: Vec<Box<BufferAccess + Send + Sync>>)
-> Result<(), SyncCommandBufferBuilderError>
{
-> Result<(), SyncCommandBufferBuilderError> {
let binding_range = {
let mut compare = state_cacher.bind_vertex_buffers();
for vb in vertex_buffers.iter() {
@ -1119,7 +1203,7 @@ unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
}
match compare.compare() {
Some(r) => r,
None => return Ok(())
None => return Ok(()),
}
};
@ -1127,7 +1211,11 @@ unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
let num_bindings = binding_range.end - binding_range.start;
let mut binder = destination.bind_vertex_buffers();
for vb in vertex_buffers.into_iter().skip(first_binding as usize).take(num_bindings as usize) {
for vb in vertex_buffers
.into_iter()
.skip(first_binding as usize)
.take(num_bindings as usize)
{
binder.add(vb);
}
binder.submit(first_binding)?;
@ -1135,8 +1223,8 @@ unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
}
unsafe fn descriptor_sets<P, Pl, S>(destination: &mut SyncCommandBufferBuilder<P>,
state_cacher: &mut StateCacher,
gfx: bool, pipeline: Pl, sets: S)
state_cacher: &mut StateCacher, gfx: bool, pipeline: Pl,
sets: S)
-> Result<(), SyncCommandBufferBuilderError>
where Pl: PipelineLayoutAbstract + Send + Sync + Clone + 'static,
S: DescriptorSetsCollection
@ -1160,7 +1248,8 @@ unsafe fn descriptor_sets<P, Pl, S>(destination: &mut SyncCommandBufferBuilder<P
for set in sets.into_iter().skip(first_binding as usize) {
sets_binder.add(set);
}
sets_binder.submit(gfx, pipeline.clone(), first_binding, iter::empty())?;
sets_binder
.submit(gfx, pipeline.clone(), first_binding, iter::empty())?;
Ok(())
}
@ -1201,8 +1290,7 @@ unsafe impl<P> CommandBuffer for AutoCommandBuffer<P> {
}
#[inline]
fn lock_submit(&self, future: &GpuFuture, queue: &Queue)
-> Result<(), CommandBufferExecError> {
fn lock_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
match self.submit_state {
SubmitState::OneTime { ref already_submitted } => {
let was_already_submitted = already_submitted.swap(true, Ordering::SeqCst);
@ -1279,7 +1367,7 @@ unsafe impl<P> DeviceOwned for AutoCommandBuffer<P> {
}
macro_rules! err_gen {
($name:ident { $($err:ident),+ }) => (
($name:ident { $($err:ident,)+ }) => (
#[derive(Debug, Clone)]
pub enum $name {
$(
@ -1329,41 +1417,41 @@ macro_rules! err_gen {
err_gen!(BuildError {
AutoCommandBufferBuilderContextError,
OomError
OomError,
});
err_gen!(BeginRenderPassError {
AutoCommandBufferBuilderContextError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(BlitImageError {
AutoCommandBufferBuilderContextError,
CheckBlitImageError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(ClearColorImageError {
AutoCommandBufferBuilderContextError,
CheckClearColorImageError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(CopyBufferError {
AutoCommandBufferBuilderContextError,
CheckCopyBufferError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(CopyBufferImageError {
AutoCommandBufferBuilderContextError,
CheckCopyBufferImageError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(FillBufferError {
AutoCommandBufferBuilderContextError,
CheckFillBufferError
CheckFillBufferError,
});
err_gen!(DispatchError {
@ -1371,7 +1459,7 @@ err_gen!(DispatchError {
CheckPushConstantsValidityError,
CheckDescriptorSetsValidityError,
CheckDispatchError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(DrawError {
@ -1380,7 +1468,7 @@ err_gen!(DrawError {
CheckPushConstantsValidityError,
CheckDescriptorSetsValidityError,
CheckVertexBufferError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(DrawIndexedError {
@ -1390,7 +1478,7 @@ err_gen!(DrawIndexedError {
CheckDescriptorSetsValidityError,
CheckVertexBufferError,
CheckIndexBufferError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(DrawIndirectError {
@ -1399,17 +1487,17 @@ err_gen!(DrawIndirectError {
CheckPushConstantsValidityError,
CheckDescriptorSetsValidityError,
CheckVertexBufferError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(ExecuteCommandsError {
AutoCommandBufferBuilderContextError,
SyncCommandBufferBuilderError
SyncCommandBufferBuilderError,
});
err_gen!(UpdateBufferError {
AutoCommandBufferBuilderContextError,
CheckUpdateBufferError
CheckUpdateBufferError,
});
#[derive(Debug, Copy, Clone)]

View File

@ -274,12 +274,12 @@ impl Drop for StandardCommandPoolAlloc {
#[cfg(test)]
mod tests {
use VulkanObject;
use command_buffer::pool::CommandPool;
use command_buffer::pool::CommandPoolBuilderAlloc;
use command_buffer::pool::StandardCommandPool;
use device::Device;
use std::sync::Arc;
use VulkanObject;
#[test]
fn reuse_command_buffers() {

View File

@ -264,9 +264,7 @@ impl Iterator for UnsafeCommandPoolAllocIter {
#[inline]
fn next(&mut self) -> Option<UnsafeCommandPoolAlloc> {
self.list
.next()
.map(|cb| UnsafeCommandPoolAlloc(cb))
self.list.next().map(|cb| UnsafeCommandPoolAlloc(cb))
}
#[inline]

View File

@ -7,15 +7,15 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::ops::Range;
use VulkanObject;
use buffer::BufferAccess;
use command_buffer::DynamicState;
use descriptor::DescriptorSet;
use pipeline::input_assembly::IndexType;
use pipeline::ComputePipelineAbstract;
use pipeline::GraphicsPipelineAbstract;
use pipeline::input_assembly::IndexType;
use smallvec::SmallVec;
use std::ops::Range;
use vk;
/// Keep track of the state of a command buffer builder, so that you don't need to bind objects
@ -359,9 +359,9 @@ mod tests {
let (device, queue) = gfx_dev_and_queue!();
const EMPTY: [i32; 0] = [];
let buf = CpuAccessibleBuffer::from_data(device,
BufferUsage::vertex_buffer(),
EMPTY.iter()).unwrap();
let buf =
CpuAccessibleBuffer::from_data(device, BufferUsage::vertex_buffer(), EMPTY.iter())
.unwrap();
let mut cacher = StateCacher::new();
@ -383,9 +383,9 @@ mod tests {
let (device, queue) = gfx_dev_and_queue!();
const EMPTY: [i32; 0] = [];
let buf = CpuAccessibleBuffer::from_data(device,
BufferUsage::vertex_buffer(),
EMPTY.iter()).unwrap();
let buf =
CpuAccessibleBuffer::from_data(device, BufferUsage::vertex_buffer(), EMPTY.iter())
.unwrap();
let mut cacher = StateCacher::new();
@ -417,13 +417,15 @@ mod tests {
const EMPTY: [i32; 0] = [];
let buf1 = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::vertex_buffer(),
EMPTY.iter()).unwrap();
EMPTY.iter())
.unwrap();
let buf2 = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::vertex_buffer(),
EMPTY.iter()).unwrap();
let buf3 = CpuAccessibleBuffer::from_data(device,
BufferUsage::vertex_buffer(),
EMPTY.iter()).unwrap();
EMPTY.iter())
.unwrap();
let buf3 =
CpuAccessibleBuffer::from_data(device, BufferUsage::vertex_buffer(), EMPTY.iter())
.unwrap();
let mut cacher = StateCacher::new();

View File

@ -401,12 +401,9 @@ impl<P> SyncCommandBufferBuilder<P> {
/// any existing resource usage.
#[inline]
pub unsafe fn from_unsafe_cmd(cmd: UnsafeCommandBufferBuilder<P>, is_secondary: bool,
inside_render_pass: bool) -> SyncCommandBufferBuilder<P> {
let latest_render_pass_enter = if inside_render_pass {
Some(0)
} else {
None
};
inside_render_pass: bool)
-> SyncCommandBufferBuilder<P> {
let latest_render_pass_enter = if inside_render_pass { Some(0) } else { None };
SyncCommandBufferBuilder {
inner: cmd,
@ -494,7 +491,6 @@ impl<P> SyncCommandBufferBuilder<P> {
// throughout the function.
match self.resources.entry(key) {
// Situation where this resource was used before in this command buffer.
Entry::Occupied(entry) => {
// `collision_cmd_id` contains the ID of the command that we are potentially
@ -524,7 +520,9 @@ impl<P> SyncCommandBufferBuilder<P> {
{
let mut commands_lock = self.commands.lock().unwrap();
let start = commands_lock.first_unflushed;
let end = if let Some(rp_enter) = commands_lock.latest_render_pass_enter {
let end = if let Some(rp_enter) = commands_lock
.latest_render_pass_enter
{
rp_enter
} else {
latest_command_id
@ -536,14 +534,17 @@ impl<P> SyncCommandBufferBuilder<P> {
command1_name: cmd1.name(),
command1_param: match entry_key_resource_ty {
KeyTy::Buffer => cmd1.buffer_name(entry_key_resource_index),
KeyTy::Image => cmd1.image_name(entry_key_resource_index),
KeyTy::Image =>
cmd1.image_name(entry_key_resource_index),
},
command1_offset: collision_cmd_id,
command2_name: cmd2.name(),
command2_param: match resource_ty {
KeyTy::Buffer => cmd2.buffer_name(resource_index),
KeyTy::Image => cmd2.image_name(resource_index),
KeyTy::Buffer =>
cmd2.buffer_name(resource_index),
KeyTy::Image =>
cmd2.image_name(resource_index),
},
command2_offset: latest_command_id,
});
@ -617,7 +618,6 @@ impl<P> SyncCommandBufferBuilder<P> {
}
},
// Situation where this is the first time we use this resource in this command buffer.
Entry::Vacant(entry) => {
// We need to perform some tweaks if the initial layout requirement of the image
@ -1123,13 +1123,17 @@ impl<P> SyncCommandBuffer<P> {
KeyTy::Buffer => {
let cmd = &commands_lock[command_id];
let buf = cmd.buffer(resource_index);
unsafe { buf.unlock(); }
unsafe {
buf.unlock();
}
},
KeyTy::Image => {
let cmd = &commands_lock[command_id];
let img = cmd.image(resource_index);
unsafe { img.unlock(); }
unsafe {
img.unlock();
}
},
}
}
@ -1203,8 +1207,8 @@ impl<P> SyncCommandBuffer<P> {
///
/// > **Note**: Suitable when implementing the `CommandBuffer` trait.
#[inline]
pub fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
queue: &Queue)
pub fn check_image_access(
&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
// TODO: check the queue family

View File

@ -312,9 +312,12 @@ impl<P> SyncCommandBufferBuilder<P> {
}
unsafe fn send(&mut self, out: &mut UnsafeCommandBufferBuilder<P>) {
out.blit_image(self.source.as_ref().unwrap(), self.source_layout,
self.destination.as_ref().unwrap(), self.destination_layout,
self.regions.take().unwrap(), self.filter);
out.blit_image(self.source.as_ref().unwrap(),
self.source_layout,
self.destination.as_ref().unwrap(),
self.destination_layout,
self.regions.take().unwrap(),
self.filter);
}
fn into_final_command(mut self: Box<Self>) -> Box<FinalCommand + Send + Sync> {
@ -417,21 +420,26 @@ impl<P> SyncCommandBufferBuilder<P> {
impl<P, I, R> Command<P> for Cmd<I, R>
where I: ImageAccess + Send + Sync + 'static,
R: Iterator<Item = UnsafeCommandBufferBuilderColorImageClear> + Send + Sync + 'static
R: Iterator<Item = UnsafeCommandBufferBuilderColorImageClear>
+ Send
+ Sync
+ 'static
{
fn name(&self) -> &'static str {
"vkCmdClearColorImage"
}
unsafe fn send(&mut self, out: &mut UnsafeCommandBufferBuilder<P>) {
out.clear_color_image(self.image.as_ref().unwrap(), self.layout, self.color,
out.clear_color_image(self.image.as_ref().unwrap(),
self.layout,
self.color,
self.regions.take().unwrap());
}
fn into_final_command(mut self: Box<Self>) -> Box<FinalCommand + Send + Sync> {
struct Fin<I>(I);
impl<I> FinalCommand for Fin<I>
where I: ImageAccess + Send + Sync + 'static,
where I: ImageAccess + Send + Sync + 'static
{
fn image(&self, num: usize) -> &ImageAccess {
assert_eq!(num, 0);
@ -1661,7 +1669,8 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
fn buffer_name(&self, mut num: usize) -> Cow<'static, str> {
for (set_num, set) in self.inner.iter().enumerate() {
if let Some(buf) = set.buffer(num) {
return format!("Buffer bound to descriptor {} of set {}", buf.1, set_num).into();
return format!("Buffer bound to descriptor {} of set {}", buf.1, set_num)
.into();
}
num -= set.num_buffers();
}
@ -1681,7 +1690,8 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
fn image_name(&self, mut num: usize) -> Cow<'static, str> {
for (set_num, set) in self.inner.iter().enumerate() {
if let Some(img) = set.image(num) {
return format!("Image bound to descriptor {} of set {}", img.1, set_num).into();
return format!("Image bound to descriptor {} of set {}", img.1, set_num)
.into();
}
num -= set.num_images();
}
@ -1693,7 +1703,8 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
let mut all_buffers = Vec::new();
for ds in self.inner.iter() {
for buf_num in 0 .. ds.num_buffers() {
let desc = ds.descriptor(ds.buffer(buf_num).unwrap().1 as usize).unwrap();
let desc = ds.descriptor(ds.buffer(buf_num).unwrap().1 as usize)
.unwrap();
let write = !desc.readonly;
let (stages, access) = desc.pipeline_stages_and_access();
all_buffers.push((write, stages, access));
@ -1731,7 +1742,7 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
ignore_me_hack = true;
image_view.descriptor_set_input_attachment_layout()
},
_ => panic!("Tried to bind an image to a non-image descriptor")
_ => panic!("Tried to bind an image to a non-image descriptor"),
};
all_images.push((write, stages, access, layout, ignore_me_hack));
}
@ -1739,8 +1750,7 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
all_images
};
self.builder
.append_command(Cmd {
self.builder.append_command(Cmd {
inner: self.inner,
graphics,
pipeline_layout,
@ -1751,18 +1761,22 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
for (n, (write, stages, access)) in all_buffers.into_iter().enumerate() {
self.builder
.prev_cmd_resource(KeyTy::Buffer,
n, write, stages, access,
n,
write,
stages,
access,
ImageLayout::Undefined,
ImageLayout::Undefined)?;
}
for (n, (write, stages, access, layout, ignore_me_hack)) in all_images.into_iter().enumerate() {
if ignore_me_hack { continue; }
for (n, (write, stages, access, layout, ignore_me_hack)) in
all_images.into_iter().enumerate()
{
if ignore_me_hack {
continue;
}
self.builder
.prev_cmd_resource(KeyTy::Image,
n, write, stages, access,
layout,
layout)?;
.prev_cmd_resource(KeyTy::Image, n, write, stages, access, layout, layout)?;
}
Ok(())
@ -1824,8 +1838,7 @@ impl<'a, P> SyncCommandBufferBuilderBindVertexBuffer<'a, P> {
let num_buffers = self.buffers.len();
self.builder
.append_command(Cmd {
self.builder.append_command(Cmd {
first_binding,
inner: Some(self.inner),
buffers: self.buffers,
@ -1867,7 +1880,8 @@ impl<'a, P> SyncCommandBufferBuilderExecuteCommands<'a, P> {
where C: CommandBuffer + Send + Sync + 'static
{
self.inner.add(&command_buffer);
self.command_buffers.push(Box::new(command_buffer) as Box<_>);
self.command_buffers
.push(Box::new(command_buffer) as Box<_>);
}
#[inline]
@ -1894,8 +1908,7 @@ impl<'a, P> SyncCommandBufferBuilderExecuteCommands<'a, P> {
}
}
self.builder
.append_command(Cmd {
self.builder.append_command(Cmd {
inner: Some(self.inner),
command_buffers: self.command_buffers,
});

View File

@ -34,13 +34,11 @@ fn basic_conflict() {
let pool = Device::standard_command_pool(&device, queue.family());
let mut sync = SyncCommandBufferBuilder::new(&pool, Kind::primary(), Flags::None).unwrap();
let buf = CpuAccessibleBuffer::from_data(device,
BufferUsage::all(),
0u32).unwrap();
let buf = CpuAccessibleBuffer::from_data(device, BufferUsage::all(), 0u32).unwrap();
match sync.copy_buffer(buf.clone(), buf.clone(), iter::once((0, 0, 4))) {
Err(SyncCommandBufferBuilderError::Conflict { .. }) => (),
_ => panic!()
_ => panic!(),
};
}
}

View File

@ -132,8 +132,7 @@ impl
pub fn secondary(occlusion_query: KindOcclusionQuery,
query_statistics_flags: QueryPipelineStatisticFlags)
-> Kind<Arc<RenderPass<EmptySinglePassRenderPassDesc>>,
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
{
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
Kind::Secondary {
render_pass: None,
occlusion_query,
@ -267,11 +266,15 @@ impl<P> UnsafeCommandBufferBuilder<P> {
};
(rp, sp, fb)
},
_ => (0, 0, 0)
_ => (0, 0, 0),
};
let (oqe, qf, ps) = match kind {
Kind::Secondary { occlusion_query, query_statistics_flags, .. } => {
Kind::Secondary {
occlusion_query,
query_statistics_flags,
..
} => {
let ps: vk::QueryPipelineStatisticFlagBits = query_statistics_flags.into();
debug_assert!(ps == 0 ||
alloc.device().enabled_features().pipeline_statistics_query);
@ -279,8 +282,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let (oqe, qf) = match occlusion_query {
KindOcclusionQuery::Allowed { control_precise_allowed } => {
debug_assert!(alloc.device().enabled_features().inherited_queries);
let qf = if control_precise_allowed { vk::QUERY_CONTROL_PRECISE_BIT }
else { 0 };
let qf = if control_precise_allowed {
vk::QUERY_CONTROL_PRECISE_BIT
} else {
0
};
(vk::TRUE, qf)
},
KindOcclusionQuery::Forbidden => (0, 0),
@ -288,7 +294,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
(oqe, qf, ps)
},
_ => (0, 0, 0)
_ => (0, 0, 0),
};
let inheritance = vk::CommandBufferInheritanceInfo {
@ -387,22 +393,28 @@ impl<P> UnsafeCommandBufferBuilder<P> {
vk::ClearValue { color: vk::ClearColorValue { uint32: val } }
},
ClearValue::Depth(val) => {
vk::ClearValue { depthStencil: vk::ClearDepthStencilValue {
vk::ClearValue {
depthStencil: vk::ClearDepthStencilValue {
depth: val,
stencil: 0,
}}
},
}
},
ClearValue::Stencil(val) => {
vk::ClearValue { depthStencil: vk::ClearDepthStencilValue {
vk::ClearValue {
depthStencil: vk::ClearDepthStencilValue {
depth: 0.0,
stencil: val,
}}
},
}
},
ClearValue::DepthStencil((depth, stencil)) => {
vk::ClearValue { depthStencil: vk::ClearDepthStencilValue {
vk::ClearValue {
depthStencil: vk::ClearDepthStencilValue {
depth: depth,
stencil: stencil,
}}
},
}
},
})
.collect();
@ -586,8 +598,10 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let regions: SmallVec<[_; 8]> = regions
.filter_map(|blit| {
// TODO: not everything is checked here
debug_assert!(blit.source_base_array_layer + blit.layer_count <= source.num_layers as u32);
debug_assert!(blit.destination_base_array_layer + blit.layer_count <= destination.num_layers as u32);
debug_assert!(blit.source_base_array_layer + blit.layer_count <=
source.num_layers as u32);
debug_assert!(blit.destination_base_array_layer + blit.layer_count <=
destination.num_layers as u32);
debug_assert!(blit.source_mip_level < destination.num_mipmap_levels as u32);
debug_assert!(blit.destination_mip_level < destination.num_mipmap_levels as u32);
@ -612,12 +626,13 @@ impl<P> UnsafeCommandBufferBuilder<P> {
x: blit.source_bottom_right[0],
y: blit.source_bottom_right[1],
z: blit.source_bottom_right[2],
}
},
],
dstSubresource: vk::ImageSubresourceLayers {
aspectMask: blit.aspect.to_vk_bits(),
mipLevel: blit.destination_mip_level,
baseArrayLayer: blit.destination_base_array_layer + destination.first_layer as u32,
baseArrayLayer: blit.destination_base_array_layer +
destination.first_layer as u32,
layerCount: blit.layer_count,
},
dstOffsets: [
@ -630,7 +645,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
x: blit.destination_bottom_right[0],
y: blit.destination_bottom_right[1],
z: blit.destination_bottom_right[2],
}
},
],
})
})
@ -642,9 +657,14 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdBlitImage(cmd, source.image.internal_object(), source_layout as u32,
destination.image.internal_object(), destination_layout as u32,
regions.len() as u32, regions.as_ptr(), filter as u32);
vk.CmdBlitImage(cmd,
source.image.internal_object(),
source_layout as u32,
destination.image.internal_object(),
destination_layout as u32,
regions.len() as u32,
regions.as_ptr(),
filter as u32);
}
// TODO: missing structs
@ -686,8 +706,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let image = image.inner();
debug_assert!(image.image.usage_transfer_destination());
debug_assert!(layout == ImageLayout::General ||
layout == ImageLayout::TransferDstOptimal);
debug_assert!(layout == ImageLayout::General || layout == ImageLayout::TransferDstOptimal);
let color = match color {
ClearValue::Float(val) => {
@ -706,8 +725,10 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let regions: SmallVec<[_; 8]> = regions
.filter_map(|region| {
debug_assert!(region.layer_count + region.base_array_layer <= image.num_layers as u32);
debug_assert!(region.level_count + region.base_mip_level <= image.num_mipmap_levels as u32);
debug_assert!(region.layer_count + region.base_array_layer <=
image.num_layers as u32);
debug_assert!(region.level_count + region.base_mip_level <=
image.num_mipmap_levels as u32);
if region.layer_count == 0 || region.level_count == 0 {
return None;
@ -813,7 +834,8 @@ impl<P> UnsafeCommandBufferBuilder<P> {
imageSubresource: vk::ImageSubresourceLayers {
aspectMask: copy.image_aspect.to_vk_bits(),
mipLevel: copy.image_mip_level + destination.first_mipmap_level as u32,
baseArrayLayer: copy.image_base_array_layer + destination.first_layer as u32,
baseArrayLayer: copy.image_base_array_layer +
destination.first_layer as u32,
layerCount: copy.image_layer_count,
},
imageOffset: vk::Offset3D {
@ -911,8 +933,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
/// Calls `vkCmdCopyQueryPoolResults` on the builder.
#[inline]
pub unsafe fn copy_query_pool_results(&mut self, queries: UnsafeQueriesRange,
destination: &BufferAccess, stride: usize)
{
destination: &BufferAccess, stride: usize) {
let destination = destination.inner();
debug_assert!(destination.offset < destination.buffer.size());
debug_assert!(destination.buffer.usage_transfer_destination());
@ -921,10 +942,14 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdCopyQueryPoolResults(cmd, queries.pool().internal_object(), queries.first_index(),
queries.count(), destination.buffer.internal_object(),
vk.CmdCopyQueryPoolResults(cmd,
queries.pool().internal_object(),
queries.first_index(),
queries.count(),
destination.buffer.internal_object(),
destination.offset as vk::DeviceSize,
stride as vk::DeviceSize, flags);
stride as vk::DeviceSize,
flags);
}
/// Calls `vkCmdDispatch` on the builder.
@ -1167,7 +1192,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
pub unsafe fn reset_query_pool(&mut self, queries: UnsafeQueriesRange) {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdResetQueryPool(cmd, queries.pool().internal_object(), queries.first_index(),
vk.CmdResetQueryPool(cmd,
queries.pool().internal_object(),
queries.first_index(),
queries.count());
}
@ -1338,7 +1365,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
pub unsafe fn write_timestamp(&mut self, query: UnsafeQuery, stages: PipelineStages) {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdWriteTimestamp(cmd, stages.into_vulkan_bits(), query.pool().internal_object(),
vk.CmdWriteTimestamp(cmd,
stages.into_vulkan_bits(),
query.pool().internal_object(),
query.index());
}
}
@ -1706,7 +1735,8 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
image: image.image.internal_object(),
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmaps.start + image.first_mipmap_level as u32,
baseMipLevel: mipmaps.start +
image.first_mipmap_level as u32,
levelCount: mipmaps.end - mipmaps.start,
baseArrayLayer: layers.start + image.first_layer as u32,
layerCount: layers.end - layers.start,

View File

@ -54,8 +54,7 @@ pub unsafe trait CommandBuffer: DeviceOwned {
///
/// If you call this function, then you should call `unlock` afterwards.
// TODO: require `&mut self` instead, but this has some consequences on other parts of the lib
fn lock_submit(&self, future: &GpuFuture, queue: &Queue)
-> Result<(), CommandBufferExecError>;
fn lock_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError>;
/// Unlocks the command buffer. Should be called once for each call to `lock_submit`.
///
@ -171,8 +170,7 @@ unsafe impl<T> CommandBuffer for T
}
#[inline]
fn lock_submit(&self, future: &GpuFuture, queue: &Queue)
-> Result<(), CommandBufferExecError> {
fn lock_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
(**self).lock_submit(future, queue)
}
@ -391,7 +389,7 @@ impl error::Error for CommandBufferExecError {
fn cause(&self) -> Option<&error::Error> {
match *self {
CommandBufferExecError::AccessError(ref err) => Some(err),
_ => None
_ => None,
}
}
}

View File

@ -27,9 +27,10 @@ use sampler::Filter;
///
pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32; 3],
source_bottom_right: [i32; 3], source_base_array_layer: u32,
source_mip_level: u32, destination: &D, destination_top_left: [i32; 3],
destination_bottom_right: [i32; 3], destination_base_array_layer: u32,
destination_mip_level: u32, layer_count: u32, filter: Filter)
source_mip_level: u32, destination: &D,
destination_top_left: [i32; 3], destination_bottom_right: [i32; 3],
destination_base_array_layer: u32, destination_mip_level: u32,
layer_count: u32, filter: Filter)
-> Result<(), CheckBlitImageError>
where S: ?Sized + ImageAccess,
D: ?Sized + ImageAccess
@ -81,7 +82,7 @@ pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32
if types_should_be_same && (source_format_ty != destination_format_ty) {
return Err(CheckBlitImageError::IncompatibleFormatsTypes {
source_format_ty: source.format().ty(),
destination_format_ty: destination.format().ty()
destination_format_ty: destination.format().ty(),
});
}
@ -90,7 +91,9 @@ pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32
None => return Err(CheckBlitImageError::SourceCoordinatesOutOfRange),
};
let destination_dimensions = match destination.dimensions().mipmap_dimensions(destination_mip_level) {
let destination_dimensions = match destination
.dimensions()
.mipmap_dimensions(destination_mip_level) {
Some(d) => d,
None => return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange),
};
@ -127,27 +130,39 @@ pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32
return Err(CheckBlitImageError::SourceCoordinatesOutOfRange);
}
if destination_top_left[0] < 0 || destination_top_left[0] > destination_dimensions.width() as i32 {
if destination_top_left[0] < 0 ||
destination_top_left[0] > destination_dimensions.width() as i32
{
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
}
if destination_top_left[1] < 0 || destination_top_left[1] > destination_dimensions.height() as i32 {
if destination_top_left[1] < 0 ||
destination_top_left[1] > destination_dimensions.height() as i32
{
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
}
if destination_top_left[2] < 0 || destination_top_left[2] > destination_dimensions.depth() as i32 {
if destination_top_left[2] < 0 ||
destination_top_left[2] > destination_dimensions.depth() as i32
{
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
}
if destination_bottom_right[0] < 0 || destination_bottom_right[0] > destination_dimensions.width() as i32 {
if destination_bottom_right[0] < 0 ||
destination_bottom_right[0] > destination_dimensions.width() as i32
{
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
}
if destination_bottom_right[1] < 0 || destination_bottom_right[1] > destination_dimensions.height() as i32 {
if destination_bottom_right[1] < 0 ||
destination_bottom_right[1] > destination_dimensions.height() as i32
{
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
}
if destination_bottom_right[2] < 0 || destination_bottom_right[2] > destination_dimensions.depth() as i32 {
if destination_bottom_right[2] < 0 ||
destination_bottom_right[2] > destination_dimensions.depth() as i32
{
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
}

View File

@ -23,7 +23,7 @@ use image::ImageAccess;
pub fn check_clear_color_image<I>(device: &Device, image: &I, first_layer: u32, num_layers: u32,
first_mipmap: u32, num_mipmaps: u32)
-> Result<(), CheckClearColorImageError>
where I: ?Sized + ImageAccess,
where I: ?Sized + ImageAccess
{
assert_eq!(image.inner().image.device().internal_object(),
device.internal_object());

View File

@ -26,7 +26,7 @@ pub fn check_copy_buffer<S, D, T>(device: &Device, source: &S, destination: &D)
-> Result<CheckCopyBuffer, CheckCopyBufferError>
where S: ?Sized + TypedBufferAccess<Content = T>,
D: ?Sized + TypedBufferAccess<Content = T>,
T: ?Sized,
T: ?Sized
{
assert_eq!(source.inner().buffer.device().internal_object(),
device.internal_object());

View File

@ -41,7 +41,7 @@ pub fn check_copy_buffer_image<B, I, P>(device: &Device, buffer: &B, image: &I,
-> Result<(), CheckCopyBufferImageError>
where I: ?Sized + ImageAccess,
B: ?Sized + TypedBufferAccess<Content = [P]>,
Format: AcceptsPixels<P>, // TODO: use a trait on the image itself instead
Format: AcceptsPixels<P> // TODO: use a trait on the image itself instead
{
let buffer_inner = buffer.inner();
let image_inner = image.inner();
@ -170,7 +170,7 @@ impl error::Error for CheckCopyBufferImageError {
CheckCopyBufferImageError::WrongPixelType(ref err) => {
Some(err)
},
_ => None
_ => None,
}
}
}

View File

@ -17,7 +17,7 @@ use descriptor::pipeline_layout::PipelineLayoutDesc;
pub fn check_descriptor_sets_validity<Pl, D>(pipeline: &Pl, descriptor_sets: &D)
-> Result<(), CheckDescriptorSetsValidityError>
where Pl: ?Sized + PipelineLayoutDesc,
D: ?Sized + DescriptorSetsCollection,
D: ?Sized + DescriptorSetsCollection
{
// What's important is not that the pipeline layout and the descriptor sets *match*. Instead
// what's important is that the descriptor sets are a superset of the pipeline layout. It's not
@ -30,7 +30,8 @@ pub fn check_descriptor_sets_validity<Pl, D>(pipeline: &Pl, descriptor_sets: &D)
let (set_desc, pipeline_desc) = match (set_desc, pipeline_desc) {
(Some(s), Some(p)) => (s, p),
(None, Some(_)) => return Err(CheckDescriptorSetsValidityError::MissingDescriptor {
(None, Some(_)) =>
return Err(CheckDescriptorSetsValidityError::MissingDescriptor {
set_num: set_num,
binding_num: binding_num,
}),

View File

@ -14,7 +14,10 @@ use device::Device;
/// Checks whether the dispatch dimensions are supported by the device.
pub fn check_dispatch(device: &Device, dimensions: [u32; 3]) -> Result<(), CheckDispatchError> {
let max = device.physical_device().limits().max_compute_work_group_count();
let max = device
.physical_device()
.limits()
.max_compute_work_group_count();
if dimensions[0] > max[0] || dimensions[1] > max[1] || dimensions[2] > max[2] {
return Err(CheckDispatchError::UnsupportedDimensions {
@ -67,7 +70,11 @@ mod tests {
let attempted = [u32::max_value(), u32::max_value(), u32::max_value()];
// Just in case the device is some kind of software implementation.
if device.physical_device().limits().max_compute_work_group_count() == attempted {
if device
.physical_device()
.limits()
.max_compute_work_group_count() == attempted
{
return;
}
@ -75,7 +82,7 @@ mod tests {
Err(validity::CheckDispatchError::UnsupportedDimensions { requested, .. }) => {
assert_eq!(requested, attempted);
},
_ => panic!()
_ => panic!(),
}
}
}

View File

@ -70,19 +70,20 @@ impl fmt::Display for CheckFillBufferError {
#[cfg(test)]
mod tests {
use super::*;
use buffer::BufferUsage;
use buffer::CpuAccessibleBuffer;
use super::*;
#[test]
fn missing_usage() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(),
0u32).unwrap();
let buffer =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(), 0u32)
.unwrap();
match check_fill_buffer(&device, &buffer) {
Err(CheckFillBufferError::BufferMissingUsage) => (),
_ => panic!()
_ => panic!(),
}
}

View File

@ -40,9 +40,7 @@ pub fn check_index_buffer<B, I>(device: &Device, buffer: &B)
// TODO: fullDrawIndexUint32 feature
Ok(CheckIndexBuffer {
num_indices: buffer.len(),
})
Ok(CheckIndexBuffer { num_indices: buffer.len() })
}
/// Information returned if `check_index_buffer` succeeds.
@ -89,33 +87,37 @@ impl fmt::Display for CheckIndexBufferError {
#[cfg(test)]
mod tests {
use super::*;
use buffer::BufferUsage;
use buffer::CpuAccessibleBuffer;
use super::*;
#[test]
fn num_indices() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::index_buffer(),
0 .. 500u32).unwrap();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
BufferUsage::index_buffer(),
0 .. 500u32)
.unwrap();
match check_index_buffer(&device, &buffer) {
Ok(CheckIndexBuffer { num_indices }) => {
assert_eq!(num_indices, 500);
},
_ => panic!()
_ => panic!(),
}
}
#[test]
fn missing_usage() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::vertex_buffer(),
0 .. 500u32).unwrap();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
BufferUsage::vertex_buffer(),
0 .. 500u32)
.unwrap();
match check_index_buffer(&device, &buffer) {
Err(CheckIndexBufferError::BufferMissingUsage) => (),
_ => panic!()
_ => panic!(),
}
}
@ -124,8 +126,7 @@ mod tests {
let (dev1, queue) = gfx_dev_and_queue!();
let (dev2, _) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_iter(dev1, BufferUsage::all(),
0 .. 500u32).unwrap();
let buffer = CpuAccessibleBuffer::from_iter(dev1, BufferUsage::all(), 0 .. 500u32).unwrap();
assert_should_panic!({
let _ = check_index_buffer(&dev2, &buffer);

View File

@ -9,18 +9,19 @@
//! Functions that check the validity of commands.
pub use self::blit_image::{check_blit_image, CheckBlitImageError};
pub use self::clear_color_image::{check_clear_color_image, CheckClearColorImageError};
pub use self::copy_buffer::{CheckCopyBufferError, check_copy_buffer, CheckCopyBuffer};
pub use self::copy_image_buffer::{CheckCopyBufferImageError, check_copy_buffer_image, CheckCopyBufferImageTy};
pub use self::descriptor_sets::{check_descriptor_sets_validity, CheckDescriptorSetsValidityError};
pub use self::dispatch::{check_dispatch, CheckDispatchError};
pub use self::blit_image::{CheckBlitImageError, check_blit_image};
pub use self::clear_color_image::{CheckClearColorImageError, check_clear_color_image};
pub use self::copy_buffer::{CheckCopyBuffer, CheckCopyBufferError, check_copy_buffer};
pub use self::copy_image_buffer::{CheckCopyBufferImageError, CheckCopyBufferImageTy,
check_copy_buffer_image};
pub use self::descriptor_sets::{CheckDescriptorSetsValidityError, check_descriptor_sets_validity};
pub use self::dispatch::{CheckDispatchError, check_dispatch};
pub use self::dynamic_state::{CheckDynamicStateValidityError, check_dynamic_state_validity};
pub use self::fill_buffer::{CheckFillBufferError, check_fill_buffer};
pub use self::index_buffer::{check_index_buffer, CheckIndexBuffer, CheckIndexBufferError};
pub use self::push_constants::{check_push_constants_validity, CheckPushConstantsValidityError};
pub use self::index_buffer::{CheckIndexBuffer, CheckIndexBufferError, check_index_buffer};
pub use self::push_constants::{CheckPushConstantsValidityError, check_push_constants_validity};
pub use self::update_buffer::{CheckUpdateBufferError, check_update_buffer};
pub use self::vertex_buffers::{check_vertex_buffers, CheckVertexBuffer, CheckVertexBufferError};
pub use self::vertex_buffers::{CheckVertexBuffer, CheckVertexBufferError, check_vertex_buffers};
mod blit_image;
mod clear_color_image;

View File

@ -17,7 +17,7 @@ use descriptor::pipeline_layout::PipelineLayoutPushConstantsCompatible;
pub fn check_push_constants_validity<Pl, Pc>(pipeline: &Pl, push_constants: &Pc)
-> Result<(), CheckPushConstantsValidityError>
where Pl: ?Sized + PipelineLayoutAbstract + PipelineLayoutPushConstantsCompatible<Pc>,
Pc: ?Sized,
Pc: ?Sized
{
if !pipeline.is_compatible(push_constants) {
return Err(CheckPushConstantsValidityError::IncompatiblePushConstants);

View File

@ -87,59 +87,66 @@ impl fmt::Display for CheckUpdateBufferError {
#[cfg(test)]
mod tests {
use super::*;
use buffer::BufferAccess;
use buffer::BufferUsage;
use buffer::CpuAccessibleBuffer;
use super::*;
#[test]
fn missing_usage() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(),
0u32).unwrap();
let buffer =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(), 0u32)
.unwrap();
match check_update_buffer(&device, &buffer, &0) {
Err(CheckUpdateBufferError::BufferMissingUsage) => (),
_ => panic!()
_ => panic!(),
}
}
#[test]
fn data_too_large() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::transfer_destination(),
0 .. 65536).unwrap();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
BufferUsage::transfer_destination(),
0 .. 65536)
.unwrap();
let data = (0 .. 65536).collect::<Vec<u32>>();
match check_update_buffer(&device, &buffer, &data[..]) {
Err(CheckUpdateBufferError::DataTooLarge) => (),
_ => panic!()
_ => panic!(),
}
}
#[test]
fn data_just_large_enough() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::transfer_destination(),
(0 .. 100000).map(|_| 0)).unwrap();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
BufferUsage::transfer_destination(),
(0 .. 100000).map(|_| 0))
.unwrap();
let data = (0 .. 65536).map(|_| 0).collect::<Vec<u8>>();
match check_update_buffer(&device, &buffer, &data[..]) {
Ok(_) => (),
_ => panic!()
_ => panic!(),
}
}
#[test]
fn wrong_alignment() {
let (device, queue) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::transfer_destination(),
0 .. 100).unwrap();
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
BufferUsage::transfer_destination(),
0 .. 100)
.unwrap();
let data = (0 .. 30).collect::<Vec<u8>>();
match check_update_buffer(&device, &buffer.slice(1 .. 50).unwrap(), &data[..]) {
Err(CheckUpdateBufferError::WrongAlignment) => (),
_ => panic!()
_ => panic!(),
}
}
@ -147,8 +154,7 @@ mod tests {
fn wrong_device() {
let (dev1, queue) = gfx_dev_and_queue!();
let (dev2, _) = gfx_dev_and_queue!();
let buffer = CpuAccessibleBuffer::from_data(dev1, BufferUsage::all(),
0u32).unwrap();
let buffer = CpuAccessibleBuffer::from_data(dev1, BufferUsage::all(), 0u32).unwrap();
assert_should_panic!({
let _ = check_update_buffer(&dev2, &buffer, &0);

View File

@ -43,10 +43,10 @@
use format::Format;
use image::Dimensions;
use sync::AccessFlagBits;
use sync::PipelineStages;
use std::cmp;
use std::ops::BitOr;
use sync::AccessFlagBits;
use sync::PipelineStages;
use vk;
/// Contains the exact description of a single descriptor.
@ -114,7 +114,8 @@ impl DescriptorDesc {
let access = match self.ty {
DescriptorDescTy::Sampler => panic!(),
DescriptorDescTy::CombinedImageSampler(_) | DescriptorDescTy::Image(_) => {
DescriptorDescTy::CombinedImageSampler(_) |
DescriptorDescTy::Image(_) => {
AccessFlagBits {
shader_read: true,
shader_write: !self.readonly,

View File

@ -7,27 +7,27 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use crossbeam::sync::SegQueue;
use std::sync::Arc;
use OomError;
use buffer::BufferAccess;
use buffer::BufferViewRef;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor_set::persistent::*;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::DescriptorSetDesc;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::descriptor_set::UnsafeDescriptorPool;
use descriptor::descriptor_set::DescriptorPool;
use descriptor::descriptor_set::DescriptorPoolAlloc;
use descriptor::descriptor_set::DescriptorPoolAllocError;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::DescriptorSetDesc;
use descriptor::descriptor_set::UnsafeDescriptorPool;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::descriptor_set::persistent::*;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use device::Device;
use device::DeviceOwned;
use image::ImageViewAccess;
use sampler::Sampler;
use OomError;
/// Pool of descriptor sets of a specific capacity and that are automatically reclaimed.
///
@ -55,7 +55,8 @@ impl<L> FixedSizeDescriptorSetsPool<L> {
let device = layout.device().clone();
let set_layout = layout.descriptor_set_layout(set_id)
let set_layout = layout
.descriptor_set_layout(set_id)
.expect("Unable to get the descriptor set layout")
.clone();
@ -89,7 +90,7 @@ impl<L> FixedSizeDescriptorSetsPool<L> {
/// A descriptor set created from a `FixedSizeDescriptorSetsPool`.
pub struct FixedSizeDescriptorSet<L, R> {
inner: PersistentDescriptorSet<L, R, LocalPoolAlloc>
inner: PersistentDescriptorSet<L, R, LocalPoolAlloc>,
}
unsafe impl<L, R> DescriptorSet for FixedSizeDescriptorSet<L, R>
@ -198,8 +199,8 @@ unsafe impl DescriptorPool for LocalPool {
// If we failed to grab an existing set, that means the current pool is full. Create a
// new one of larger capacity.
let count = *layout.descriptors_count() * self.next_capacity;
let mut new_pool = UnsafeDescriptorPool::new(self.device.clone(), &count,
self.next_capacity, false)?;
let mut new_pool =
UnsafeDescriptorPool::new(self.device.clone(), &count, self.next_capacity, false)?;
let alloc = unsafe {
match new_pool.alloc((0 .. self.next_capacity).map(|_| layout)) {
Ok(iter) => {
@ -278,9 +279,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
#[inline]
pub fn build(self) -> Result<FixedSizeDescriptorSet<L, R>, PersistentDescriptorSetBuildError> {
let inner = self.inner.build_with_pool(&mut self.pool.pool)?;
Ok(FixedSizeDescriptorSet {
inner: inner,
})
Ok(FixedSizeDescriptorSet { inner: inner })
}
/// Call this function if the next element of the set is an array in order to set the value of
@ -291,19 +290,23 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
/// This function can be called even if the descriptor isn't an array, and it is valid to enter
/// the "array", add one element, then leave.
#[inline]
pub fn enter_array(self) -> Result<FixedSizeDescriptorSetBuilderArray<'a, L, R>, PersistentDescriptorSetError> {
pub fn enter_array(
self)
-> Result<FixedSizeDescriptorSetBuilderArray<'a, L, R>, PersistentDescriptorSetError> {
Ok(FixedSizeDescriptorSetBuilderArray {
pool: self.pool,
inner: self.inner.enter_array()?
inner: self.inner.enter_array()?,
})
}
/// Skips the current descriptor if it is empty.
#[inline]
pub fn add_empty(self) -> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
pub fn add_empty(
self)
-> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
Ok(FixedSizeDescriptorSetBuilder {
pool: self.pool,
inner: self.inner.add_empty()?
inner: self.inner.add_empty()?,
})
}
@ -317,12 +320,16 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
///
#[inline]
pub fn add_buffer<T>(self, buffer: T)
-> Result<FixedSizeDescriptorSetBuilder<'a, L, (R, PersistentDescriptorSetBuf<T>)>, PersistentDescriptorSetError>
-> Result<FixedSizeDescriptorSetBuilder<'a,
L,
(R,
PersistentDescriptorSetBuf<T>)>,
PersistentDescriptorSetError>
where T: BufferAccess
{
Ok(FixedSizeDescriptorSetBuilder {
pool: self.pool,
inner: self.inner.add_buffer(buffer)?
inner: self.inner.add_buffer(buffer)?,
})
}
@ -340,7 +347,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
{
Ok(FixedSizeDescriptorSetBuilder {
pool: self.pool,
inner: self.inner.add_buffer_view(view)?
inner: self.inner.add_buffer_view(view)?,
})
}
@ -354,12 +361,15 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
///
#[inline]
pub fn add_image<T>(self, image_view: T)
-> Result<FixedSizeDescriptorSetBuilder<'a, L, (R, PersistentDescriptorSetImg<T>)>, PersistentDescriptorSetError>
-> Result<FixedSizeDescriptorSetBuilder<'a,
L,
(R, PersistentDescriptorSetImg<T>)>,
PersistentDescriptorSetError>
where T: ImageViewAccess
{
Ok(FixedSizeDescriptorSetBuilder {
pool: self.pool,
inner: self.inner.add_image(image_view)?
inner: self.inner.add_image(image_view)?,
})
}
@ -378,7 +388,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
{
Ok(FixedSizeDescriptorSetBuilder {
pool: self.pool,
inner: self.inner.add_sampled_image(image_view, sampler)?
inner: self.inner.add_sampled_image(image_view, sampler)?,
})
}
@ -392,11 +402,13 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
///
#[inline]
pub fn add_sampler(self, sampler: Arc<Sampler>)
-> Result<FixedSizeDescriptorSetBuilder<'a, L, (R, PersistentDescriptorSetSampler)>, PersistentDescriptorSetError>
{
-> Result<FixedSizeDescriptorSetBuilder<'a,
L,
(R, PersistentDescriptorSetSampler)>,
PersistentDescriptorSetError> {
Ok(FixedSizeDescriptorSetBuilder {
pool: self.pool,
inner: self.inner.add_sampler(sampler)?
inner: self.inner.add_sampler(sampler)?,
})
}
}
@ -407,12 +419,16 @@ pub struct FixedSizeDescriptorSetBuilderArray<'a, L: 'a, R> {
inner: PersistentDescriptorSetBuilderArray<L, R>,
}
impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLayoutAbstract {
impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R>
where L: PipelineLayoutAbstract
{
/// Leaves the array. Call this once you added all the elements of the array.
pub fn leave_array(self) -> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
pub fn leave_array(
self)
-> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
Ok(FixedSizeDescriptorSetBuilder {
pool: self.pool,
inner: self.inner.leave_array()?
inner: self.inner.leave_array()?,
})
}
@ -430,7 +446,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
{
Ok(FixedSizeDescriptorSetBuilderArray {
pool: self.pool,
inner: self.inner.add_buffer(buffer)?
inner: self.inner.add_buffer(buffer)?,
})
}
@ -448,7 +464,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
{
Ok(FixedSizeDescriptorSetBuilderArray {
pool: self.pool,
inner: self.inner.add_buffer_view(view)?
inner: self.inner.add_buffer_view(view)?,
})
}
@ -466,7 +482,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
{
Ok(FixedSizeDescriptorSetBuilderArray {
pool: self.pool,
inner: self.inner.add_image(image_view)?
inner: self.inner.add_image(image_view)?,
})
}
@ -484,7 +500,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
{
Ok(FixedSizeDescriptorSetBuilderArray {
pool: self.pool,
inner: self.inner.add_sampled_image(image_view, sampler)?
inner: self.inner.add_sampled_image(image_view, sampler)?,
})
}
@ -501,7 +517,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
{
Ok(FixedSizeDescriptorSetBuilderArray {
pool: self.pool,
inner: self.inner.add_sampler(sampler)?
inner: self.inner.add_sampler(sampler)?,
})
}
}

View File

@ -41,16 +41,16 @@ use descriptor::descriptor::DescriptorDesc;
use image::ImageViewAccess;
pub use self::collection::DescriptorSetsCollection;
pub use self::fixed_size_pool::FixedSizeDescriptorSetsPool;
pub use self::fixed_size_pool::FixedSizeDescriptorSet;
pub use self::fixed_size_pool::FixedSizeDescriptorSetBuilder;
pub use self::fixed_size_pool::FixedSizeDescriptorSetBuilderArray;
pub use self::fixed_size_pool::FixedSizeDescriptorSetsPool;
pub use self::persistent::PersistentDescriptorSet;
pub use self::persistent::PersistentDescriptorSetBuf;
pub use self::persistent::PersistentDescriptorSetBufView;
pub use self::persistent::PersistentDescriptorSetBuildError;
pub use self::persistent::PersistentDescriptorSetBuilder;
pub use self::persistent::PersistentDescriptorSetBuilderArray;
pub use self::persistent::PersistentDescriptorSetBuf;
pub use self::persistent::PersistentDescriptorSetBufView;
pub use self::persistent::PersistentDescriptorSetError;
pub use self::persistent::PersistentDescriptorSetImg;
pub use self::persistent::PersistentDescriptorSetSampler;

View File

@ -11,6 +11,8 @@ use std::error;
use std::fmt;
use std::sync::Arc;
use OomError;
use VulkanObject;
use buffer::BufferAccess;
use buffer::BufferViewRef;
use descriptor::descriptor::DescriptorDesc;
@ -19,22 +21,20 @@ use descriptor::descriptor::DescriptorImageDesc;
use descriptor::descriptor::DescriptorImageDescArray;
use descriptor::descriptor::DescriptorImageDescDimensions;
use descriptor::descriptor::DescriptorType;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::DescriptorSetDesc;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::descriptor_set::DescriptorPool;
use descriptor::descriptor_set::DescriptorPoolAlloc;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::DescriptorSetDesc;
use descriptor::descriptor_set::DescriptorWrite;
use descriptor::descriptor_set::StdDescriptorPoolAlloc;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use device::Device;
use device::DeviceOwned;
use format::Format;
use image::ImageViewAccess;
use sampler::Sampler;
use OomError;
use VulkanObject;
/// An immutable descriptor set that is expected to be long-lived.
///
@ -59,7 +59,7 @@ pub struct PersistentDescriptorSet<L, R, P = StdDescriptorPoolAlloc> {
resources: R,
pipeline_layout: L,
set_id: usize,
layout: Arc<UnsafeDescriptorSetLayout>
layout: Arc<UnsafeDescriptorSetLayout>,
}
impl<L> PersistentDescriptorSet<L, ()> {
@ -122,7 +122,9 @@ unsafe impl<L, R, P> DescriptorSetDesc for PersistentDescriptorSet<L, R, P>
{
#[inline]
fn num_bindings(&self) -> usize {
self.pipeline_layout.num_bindings_in_set(self.set_id).unwrap()
self.pipeline_layout
.num_bindings_in_set(self.set_id)
.unwrap()
}
#[inline]
@ -167,7 +169,9 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
{
/// Builds a `PersistentDescriptorSet` from the builder.
#[inline]
pub fn build(self) -> Result<PersistentDescriptorSet<L, R, StdDescriptorPoolAlloc>, PersistentDescriptorSetBuildError> {
pub fn build(self)
-> Result<PersistentDescriptorSet<L, R, StdDescriptorPoolAlloc>,
PersistentDescriptorSetBuildError> {
let mut pool = Device::standard_descriptor_pool(self.layout.device());
self.build_with_pool(&mut pool)
}
@ -178,7 +182,8 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
///
/// Panics if the pool doesn't have the same device as the pipeline layout.
///
pub fn build_with_pool<P>(self, pool: &mut P)
pub fn build_with_pool<P>(
self, pool: &mut P)
-> Result<PersistentDescriptorSet<L, R, P::Alloc>, PersistentDescriptorSetBuildError>
where P: ?Sized + DescriptorPool
{
@ -196,13 +201,15 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
debug_assert_eq!(expected_desc, self.binding_id);
let set_layout = self.layout.descriptor_set_layout(self.set_id)
let set_layout = self.layout
.descriptor_set_layout(self.set_id)
.expect("Unable to get the descriptor set layout")
.clone();
let set = unsafe {
let mut set = pool.alloc(&set_layout)?;
set.inner_mut().write(pool.device(), self.writes.into_iter());
set.inner_mut()
.write(pool.device(), self.writes.into_iter());
set
};
@ -223,7 +230,9 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
/// This function can be called even if the descriptor isn't an array, and it is valid to enter
/// the "array", add one element, then leave.
#[inline]
pub fn enter_array(self) -> Result<PersistentDescriptorSetBuilderArray<L, R>, PersistentDescriptorSetError> {
pub fn enter_array(
self)
-> Result<PersistentDescriptorSetBuilderArray<L, R>, PersistentDescriptorSetError> {
let desc = match self.layout.descriptor(self.set_id, self.binding_id) {
Some(d) => d,
None => return Err(PersistentDescriptorSetError::EmptyExpected),
@ -238,11 +247,13 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
/// Skips the current descriptor if it is empty.
#[inline]
pub fn add_empty(mut self) -> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
pub fn add_empty(
mut self)
-> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
match self.layout.descriptor(self.set_id, self.binding_id) {
None => (),
Some(desc) => return Err(PersistentDescriptorSetError::WrongDescriptorTy {
expected: desc.ty.ty().unwrap()
expected: desc.ty.ty().unwrap(),
}),
}
@ -260,12 +271,13 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
///
#[inline]
pub fn add_buffer<T>(self, buffer: T)
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetBuf<T>)>, PersistentDescriptorSetError>
-> Result<PersistentDescriptorSetBuilder<L,
(R,
PersistentDescriptorSetBuf<T>)>,
PersistentDescriptorSetError>
where T: BufferAccess
{
self.enter_array()?
.add_buffer(buffer)?
.leave_array()
self.enter_array()?.add_buffer(buffer)?.leave_array()
}
/// Binds a buffer view as the next descriptor.
@ -280,9 +292,7 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetBufView<T>)>, PersistentDescriptorSetError>
where T: BufferViewRef
{
self.enter_array()?
.add_buffer_view(view)?
.leave_array()
self.enter_array()?.add_buffer_view(view)?.leave_array()
}
/// Binds an image view as the next descriptor.
@ -295,12 +305,13 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
///
#[inline]
pub fn add_image<T>(self, image_view: T)
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetImg<T>)>, PersistentDescriptorSetError>
-> Result<PersistentDescriptorSetBuilder<L,
(R,
PersistentDescriptorSetImg<T>)>,
PersistentDescriptorSetError>
where T: ImageViewAccess
{
self.enter_array()?
.add_image(image_view)?
.leave_array()
self.enter_array()?.add_image(image_view)?.leave_array()
}
/// Binds an image view with a sampler as the next descriptor.
@ -331,11 +342,11 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
///
#[inline]
pub fn add_sampler(self, sampler: Arc<Sampler>)
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetSampler)>, PersistentDescriptorSetError>
{
self.enter_array()?
.add_sampler(sampler)?
.leave_array()
-> Result<PersistentDescriptorSetBuilder<L,
(R,
PersistentDescriptorSetSampler)>,
PersistentDescriptorSetError> {
self.enter_array()?.add_sampler(sampler)?.leave_array()
}
}
@ -349,9 +360,13 @@ pub struct PersistentDescriptorSetBuilderArray<L, R> {
desc: DescriptorDesc,
}
impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbstract {
impl<L, R> PersistentDescriptorSetBuilderArray<L, R>
where L: PipelineLayoutAbstract
{
/// Leaves the array. Call this once you added all the elements of the array.
pub fn leave_array(mut self) -> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
pub fn leave_array(
mut self)
-> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
if self.desc.array_count > self.array_element as u32 {
return Err(PersistentDescriptorSetError::MissingArrayElements {
expected: self.desc.array_count,
@ -395,7 +410,11 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
// TODO: eventually shouldn't be an assert ; for now robust_buffer_access is always
// enabled so this assert should never fail in practice, but we put it anyway
// in case we forget to adjust this code
assert!(self.builder.layout.device().enabled_features().robust_buffer_access);
assert!(self.builder
.layout
.device()
.enabled_features()
.robust_buffer_access);
if buffer_desc.storage {
if !buffer.inner().buffer.usage_storage_buffer() {
@ -404,7 +423,8 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
unsafe {
DescriptorWrite::storage_buffer(self.builder.binding_id as u32,
self.array_element as u32, &buffer)
self.array_element as u32,
&buffer)
}
} else {
if !buffer.inner().buffer.usage_uniform_buffer() {
@ -413,13 +433,14 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
unsafe {
DescriptorWrite::uniform_buffer(self.builder.binding_id as u32,
self.array_element as u32, &buffer)
self.array_element as u32,
&buffer)
}
}
},
ref d => {
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
expected: d.ty().unwrap()
expected: d.ty().unwrap(),
});
},
});
@ -430,10 +451,11 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
set_id: self.builder.set_id,
binding_id: self.builder.binding_id,
writes: self.builder.writes,
resources: (self.builder.resources, PersistentDescriptorSetBuf {
resources: (self.builder.resources,
PersistentDescriptorSetBuf {
buffer: buffer,
descriptor_num: self.builder.binding_id as u32,
})
}),
},
desc: self.desc,
array_element: self.array_element + 1,
@ -483,7 +505,7 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
},
ref d => {
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
expected: d.ty().unwrap()
expected: d.ty().unwrap(),
});
},
});
@ -494,10 +516,11 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
set_id: self.builder.set_id,
binding_id: self.builder.binding_id,
writes: self.builder.writes,
resources: (self.builder.resources, PersistentDescriptorSetBufView {
resources: (self.builder.resources,
PersistentDescriptorSetBufView {
view: view,
descriptor_num: self.builder.binding_id as u32,
})
}),
},
desc: self.desc,
array_element: self.array_element + 1,
@ -523,7 +546,9 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
return Err(PersistentDescriptorSetError::ArrayOutOfBounds);
}
let desc = match self.builder.layout.descriptor(self.builder.set_id, self.builder.binding_id) {
let desc = match self.builder
.layout
.descriptor(self.builder.set_id, self.builder.binding_id) {
Some(d) => d,
None => return Err(PersistentDescriptorSetError::EmptyExpected),
};
@ -533,12 +558,19 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
image_match_desc(&image_view, &desc)?;
if desc.sampled {
DescriptorWrite::sampled_image(self.builder.binding_id as u32, self.array_element as u32, &image_view)
DescriptorWrite::sampled_image(self.builder.binding_id as u32,
self.array_element as u32,
&image_view)
} else {
DescriptorWrite::storage_image(self.builder.binding_id as u32, self.array_element as u32, &image_view)
DescriptorWrite::storage_image(self.builder.binding_id as u32,
self.array_element as u32,
&image_view)
}
},
DescriptorDescTy::InputAttachment { multisampled, array_layers } => {
DescriptorDescTy::InputAttachment {
multisampled,
array_layers,
} => {
if !image_view.parent().inner().image.usage_input_attachment() {
return Err(PersistentDescriptorSetError::MissingUsage);
}
@ -557,24 +589,29 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
expected: 1,
obtained: image_layers,
})
});
}
},
DescriptorImageDescArray::Arrayed { max_layers: Some(max_layers) } => {
if image_layers > max_layers { // TODO: is this correct? "max" layers? or is it in fact min layers?
if image_layers > max_layers {
// TODO: is this correct? "max" layers? or is it in fact min layers?
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
expected: max_layers,
obtained: image_layers,
})
});
}
},
DescriptorImageDescArray::Arrayed { max_layers: None } => {},
};
DescriptorWrite::input_attachment(self.builder.binding_id as u32, self.array_element as u32, &image_view)
DescriptorWrite::input_attachment(self.builder.binding_id as u32,
self.array_element as u32,
&image_view)
},
ty => {
return Err(PersistentDescriptorSetError::WrongDescriptorTy { expected: ty.ty().unwrap() });
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
expected: ty.ty().unwrap(),
});
},
});
@ -584,10 +621,11 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
set_id: self.builder.set_id,
binding_id: self.builder.binding_id,
writes: self.builder.writes,
resources: (self.builder.resources, PersistentDescriptorSetImg {
resources: (self.builder.resources,
PersistentDescriptorSetImg {
image: image_view,
descriptor_num: self.builder.binding_id as u32,
})
}),
},
desc: self.desc,
array_element: self.array_element + 1,
@ -615,7 +653,9 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
return Err(PersistentDescriptorSetError::ArrayOutOfBounds);
}
let desc = match self.builder.layout.descriptor(self.builder.set_id, self.builder.binding_id) {
let desc = match self.builder
.layout
.descriptor(self.builder.set_id, self.builder.binding_id) {
Some(d) => d,
None => return Err(PersistentDescriptorSetError::EmptyExpected),
};
@ -627,10 +667,15 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
self.builder.writes.push(match desc.ty {
DescriptorDescTy::CombinedImageSampler(ref desc) => {
image_match_desc(&image_view, &desc)?;
DescriptorWrite::combined_image_sampler(self.builder.binding_id as u32, self.array_element as u32, &sampler, &image_view)
DescriptorWrite::combined_image_sampler(self.builder.binding_id as u32,
self.array_element as u32,
&sampler,
&image_view)
},
ty => {
return Err(PersistentDescriptorSetError::WrongDescriptorTy { expected: ty.ty().unwrap() });
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
expected: ty.ty().unwrap(),
});
},
});
@ -640,12 +685,12 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
set_id: self.builder.set_id,
binding_id: self.builder.binding_id,
writes: self.builder.writes,
resources: ((self.builder.resources, PersistentDescriptorSetImg {
resources: ((self.builder.resources,
PersistentDescriptorSetImg {
image: image_view,
descriptor_num: self.builder.binding_id as u32,
}), PersistentDescriptorSetSampler {
sampler: sampler,
}),
PersistentDescriptorSetSampler { sampler: sampler }),
},
desc: self.desc,
array_element: self.array_element + 1,
@ -670,17 +715,23 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
return Err(PersistentDescriptorSetError::ArrayOutOfBounds);
}
let desc = match self.builder.layout.descriptor(self.builder.set_id, self.builder.binding_id) {
let desc = match self.builder
.layout
.descriptor(self.builder.set_id, self.builder.binding_id) {
Some(d) => d,
None => return Err(PersistentDescriptorSetError::EmptyExpected),
};
self.builder.writes.push(match desc.ty {
DescriptorDescTy::Sampler => {
DescriptorWrite::sampler(self.builder.binding_id as u32, self.array_element as u32, &sampler)
DescriptorWrite::sampler(self.builder.binding_id as u32,
self.array_element as u32,
&sampler)
},
ty => {
return Err(PersistentDescriptorSetError::WrongDescriptorTy { expected: ty.ty().unwrap() });
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
expected: ty.ty().unwrap(),
});
},
});
@ -690,9 +741,8 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
set_id: self.builder.set_id,
binding_id: self.builder.binding_id,
writes: self.builder.writes,
resources: (self.builder.resources, PersistentDescriptorSetSampler {
sampler: sampler,
}),
resources: (self.builder.resources,
PersistentDescriptorSetSampler { sampler: sampler }),
},
desc: self.desc,
array_element: self.array_element + 1,
@ -744,15 +794,16 @@ fn image_match_desc<I>(image_view: &I, desc: &DescriptorImageDesc)
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
expected: 1,
obtained: image_layers,
})
});
}
},
DescriptorImageDescArray::Arrayed { max_layers: Some(max_layers) } => {
if image_layers > max_layers { // TODO: is this correct? "max" layers? or is it in fact min layers?
if image_layers > max_layers {
// TODO: is this correct? "max" layers? or is it in fact min layers?
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
expected: max_layers,
obtained: image_layers,
})
});
}
},
DescriptorImageDescArray::Arrayed { max_layers: None } => {},
@ -798,7 +849,7 @@ pub struct PersistentDescriptorSetBuf<B> {
unsafe impl<R, B> PersistentDescriptorSetResources for (R, PersistentDescriptorSetBuf<B>)
where R: PersistentDescriptorSetResources,
B: BufferAccess,
B: BufferAccess
{
#[inline]
fn num_buffers(&self) -> usize {
@ -837,7 +888,7 @@ pub struct PersistentDescriptorSetBufView<V>
unsafe impl<R, V> PersistentDescriptorSetResources for (R, PersistentDescriptorSetBufView<V>)
where R: PersistentDescriptorSetResources,
V: BufferViewRef,
V: BufferViewRef
{
#[inline]
fn num_buffers(&self) -> usize {
@ -874,7 +925,7 @@ pub struct PersistentDescriptorSetImg<I> {
unsafe impl<R, I> PersistentDescriptorSetResources for (R, PersistentDescriptorSetImg<I>)
where R: PersistentDescriptorSetResources,
I: ImageViewAccess,
I: ImageViewAccess
{
#[inline]
fn num_buffers(&self) -> usize {

View File

@ -180,14 +180,14 @@ impl Drop for StdDescriptorPoolAlloc {
#[cfg(test)]
mod tests {
use std::iter;
use std::sync::Arc;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::DescriptorDescTy;
use descriptor::descriptor::ShaderStages;
use descriptor::descriptor_set::DescriptorPool;
use descriptor::descriptor_set::StdDescriptorPool;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use std::iter;
use std::sync::Arc;
#[test]
fn desc_pool_kept_alive() {
@ -200,7 +200,8 @@ mod tests {
stages: ShaderStages::all(),
readonly: false,
};
let layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(desc))).unwrap();
let layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(desc)))
.unwrap();
let mut pool = Arc::new(StdDescriptorPool::new(device));
let pool_weak = Arc::downgrade(&pool);

View File

@ -829,8 +829,7 @@ impl DescriptorWrite {
}
#[inline]
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
view: &BufferView<F, B>)
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &BufferView<F, B>)
-> DescriptorWrite
where B: BufferAccess
{
@ -844,8 +843,7 @@ impl DescriptorWrite {
}
#[inline]
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
view: &BufferView<F, B>)
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &BufferView<F, B>)
-> DescriptorWrite
where B: BufferAccess
{
@ -865,9 +863,19 @@ impl DescriptorWrite {
let size = buffer.size();
let BufferInner { buffer, offset } = buffer.inner();
debug_assert_eq!(offset % buffer.device().physical_device().limits()
.min_uniform_buffer_offset_alignment() as usize, 0);
debug_assert!(size <= buffer.device().physical_device().limits()
debug_assert_eq!(offset %
buffer
.device()
.physical_device()
.limits()
.min_uniform_buffer_offset_alignment() as
usize,
0);
debug_assert!(size <=
buffer
.device()
.physical_device()
.limits()
.max_uniform_buffer_range() as usize);
DescriptorWrite {
@ -888,9 +896,19 @@ impl DescriptorWrite {
let size = buffer.size();
let BufferInner { buffer, offset } = buffer.inner();
debug_assert_eq!(offset % buffer.device().physical_device().limits()
.min_storage_buffer_offset_alignment() as usize, 0);
debug_assert!(size <= buffer.device().physical_device().limits()
debug_assert_eq!(offset %
buffer
.device()
.physical_device()
.limits()
.min_storage_buffer_offset_alignment() as
usize,
0);
debug_assert!(size <=
buffer
.device()
.physical_device()
.limits()
.max_storage_buffer_range() as usize);
DescriptorWrite {
@ -912,9 +930,19 @@ impl DescriptorWrite {
let size = buffer.size();
let BufferInner { buffer, offset } = buffer.inner();
debug_assert_eq!(offset % buffer.device().physical_device().limits()
.min_uniform_buffer_offset_alignment() as usize, 0);
debug_assert!(size <= buffer.device().physical_device().limits()
debug_assert_eq!(offset %
buffer
.device()
.physical_device()
.limits()
.min_uniform_buffer_offset_alignment() as
usize,
0);
debug_assert!(size <=
buffer
.device()
.physical_device()
.limits()
.max_uniform_buffer_range() as usize);
DescriptorWrite {
@ -934,9 +962,19 @@ impl DescriptorWrite {
let size = buffer.size();
let BufferInner { buffer, offset } = buffer.inner();
debug_assert_eq!(offset % buffer.device().physical_device().limits()
.min_storage_buffer_offset_alignment() as usize, 0);
debug_assert!(size <= buffer.device().physical_device().limits()
debug_assert_eq!(offset %
buffer
.device()
.physical_device()
.limits()
.min_storage_buffer_offset_alignment() as
usize,
0);
debug_assert!(size <=
buffer
.device()
.physical_device()
.limits()
.max_storage_buffer_range() as usize);
DescriptorWrite {
@ -1085,7 +1123,8 @@ mod tests {
assert_should_panic!("Tried to allocate from a pool with a set layout \
of a different device",
{
let mut pool = UnsafeDescriptorPool::new(device2, &desc, 10, false).unwrap();
let mut pool =
UnsafeDescriptorPool::new(device2, &desc, 10, false).unwrap();
unsafe {
let _ = pool.alloc(iter::once(&set_layout));

View File

@ -55,10 +55,12 @@ pub fn check_desc_against_limits<D>(desc: &D, limits: Limits)
num_samplers.increment(descriptor.array_count, &descriptor.stages);
num_sampled_images.increment(descriptor.array_count, &descriptor.stages);
},
DescriptorType::SampledImage | DescriptorType::UniformTexelBuffer => {
DescriptorType::SampledImage |
DescriptorType::UniformTexelBuffer => {
num_sampled_images.increment(descriptor.array_count, &descriptor.stages);
},
DescriptorType::StorageImage | DescriptorType::StorageTexelBuffer => {
DescriptorType::StorageImage |
DescriptorType::StorageTexelBuffer => {
num_storage_images.increment(descriptor.array_count, &descriptor.stages);
},
DescriptorType::UniformBuffer => {
@ -183,11 +185,7 @@ pub fn check_desc_against_limits<D>(desc: &D, limits: Limits)
}
for pc_id in 0 .. desc.num_push_constants_ranges() {
let PipelineLayoutDescPcRange {
offset,
size,
..
} = {
let PipelineLayoutDescPcRange { offset, size, .. } = {
match desc.push_constants_range(pc_id) {
Some(o) => o,
None => continue,
@ -361,19 +359,29 @@ impl error::Error for PipelineLayoutLimitsError {
PipelineLayoutLimitsError::MaxPerStageDescriptorSamplersLimitExceeded { .. } => {
"the `max_per_stage_descriptor_samplers()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxPerStageDescriptorUniformBuffersLimitExceeded { .. } => {
PipelineLayoutLimitsError::MaxPerStageDescriptorUniformBuffersLimitExceeded {
..
} => {
"the `max_per_stage_descriptor_uniform_buffers()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageBuffersLimitExceeded { .. } => {
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageBuffersLimitExceeded {
..
} => {
"the `max_per_stage_descriptor_storage_buffers()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxPerStageDescriptorSampledImagesLimitExceeded { .. } => {
PipelineLayoutLimitsError::MaxPerStageDescriptorSampledImagesLimitExceeded {
..
} => {
"the `max_per_stage_descriptor_sampled_images()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageImagesLimitExceeded { .. } => {
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageImagesLimitExceeded {
..
} => {
"the `max_per_stage_descriptor_storage_images()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxPerStageDescriptorInputAttachmentsLimitExceeded { .. } => {
PipelineLayoutLimitsError::MaxPerStageDescriptorInputAttachmentsLimitExceeded {
..
} => {
"the `max_per_stage_descriptor_input_attachments()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxDescriptorSetSamplersLimitExceeded { .. } => {
@ -382,13 +390,17 @@ impl error::Error for PipelineLayoutLimitsError {
PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersLimitExceeded { .. } => {
"the `max_descriptor_set_uniform_buffers()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersDynamicLimitExceeded { .. } => {
PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersDynamicLimitExceeded {
..
} => {
"the `max_descriptor_set_uniform_buffers_dynamic()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersLimitExceeded { .. } => {
"the `max_descriptor_set_storage_buffers()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersDynamicLimitExceeded { .. } => {
PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersDynamicLimitExceeded {
..
} => {
"the `max_descriptor_set_storage_buffers_dynamic()` limit has been exceeded"
},
PipelineLayoutLimitsError::MaxDescriptorSetSampledImagesLimitExceeded { .. } => {
@ -426,22 +438,46 @@ struct Counter {
impl Counter {
fn increment(&mut self, num: u32, stages: &ShaderStages) {
self.total += num;
if stages.compute { self.compute += num; }
if stages.vertex { self.vertex += num; }
if stages.tessellation_control { self.tess_ctl += num; }
if stages.tessellation_evaluation { self.tess_eval += num; }
if stages.geometry { self.geometry += num; }
if stages.fragment { self.frag += num; }
if stages.compute {
self.compute += num;
}
if stages.vertex {
self.vertex += num;
}
if stages.tessellation_control {
self.tess_ctl += num;
}
if stages.tessellation_evaluation {
self.tess_eval += num;
}
if stages.geometry {
self.geometry += num;
}
if stages.fragment {
self.frag += num;
}
}
fn max_per_stage(&self) -> u32 {
let mut max = 0;
if self.compute > max { max = self.compute; }
if self.vertex > max { max = self.vertex; }
if self.geometry > max { max = self.geometry; }
if self.tess_ctl > max { max = self.tess_ctl; }
if self.tess_eval > max { max = self.tess_eval; }
if self.frag > max { max = self.frag; }
if self.compute > max {
max = self.compute;
}
if self.vertex > max {
max = self.vertex;
}
if self.geometry > max {
max = self.geometry;
}
if self.tess_ctl > max {
max = self.tess_ctl;
}
if self.tess_eval > max {
max = self.tess_eval;
}
if self.frag > max {
max = self.frag;
}
max
}
}

View File

@ -17,11 +17,11 @@ use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::ShaderStages;
use descriptor::descriptor_set::DescriptorSetsCollection;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::limits_check;
use descriptor::pipeline_layout::PipelineLayout;
use descriptor::pipeline_layout::PipelineLayoutCreationError;
use descriptor::pipeline_layout::PipelineLayoutDescUnion;
use descriptor::pipeline_layout::PipelineLayoutSys;
use descriptor::pipeline_layout::limits_check;
use device::Device;
use device::DeviceOwned;
@ -103,8 +103,7 @@ pub unsafe trait PipelineLayoutDesc {
/// Checks whether this description fulfills the device limits requirements.
#[inline]
fn check_against_limits(&self, device: &Device)
-> Result<(), limits_check::PipelineLayoutLimitsError>
{
-> Result<(), limits_check::PipelineLayoutLimitsError> {
limits_check::check_desc_against_limits(self, device.physical_device().limits())
}

View File

@ -168,8 +168,8 @@ impl Device {
///
// TODO: return Arc<Queue> and handle synchronization in the Queue
// TODO: should take the PhysicalDevice by value
pub fn new<'a, I, Ext>(phys: PhysicalDevice, requested_features: &Features,
extensions: Ext, queue_families: I)
pub fn new<'a, I, Ext>(phys: PhysicalDevice, requested_features: &Features, extensions: Ext,
queue_families: I)
-> Result<(Arc<Device>, QueuesIter), DeviceCreationError>
where I: IntoIterator<Item = (QueueFamily<'a>, f32)>,
Ext: Into<RawDeviceExtensions>
@ -296,7 +296,8 @@ impl Device {
*const _
});
let device = Arc::new(Device {
let device =
Arc::new(Device {
instance: phys.instance().clone(),
physical_device: phys.index(),
device: device,
@ -310,8 +311,7 @@ impl Device {
..requested_features.clone()
},
extensions: (&extensions).into(),
active_queue_families: output_queues.iter()
.map(|&(q, _)| q).collect(),
active_queue_families: output_queues.iter().map(|&(q, _)| q).collect(),
allocation_count: Mutex::new(0),
fence_pool: Mutex::new(Vec::new()),
semaphore_pool: Mutex::new(Vec::new()),
@ -367,9 +367,11 @@ impl Device {
/// > **Note**: Will return `-> impl ExactSizeIterator<Item = QueueFamily>` in the future.
// TODO: ^
#[inline]
pub fn active_queue_families<'a>(&'a self) -> Box<ExactSizeIterator<Item = QueueFamily<'a>> + 'a> {
pub fn active_queue_families<'a>(&'a self)
-> Box<ExactSizeIterator<Item = QueueFamily<'a>> + 'a> {
let physical_device = self.physical_device();
Box::new(self.active_queue_families.iter()
Box::new(self.active_queue_families
.iter()
.map(move |&id| physical_device.queue_family_by_id(id).unwrap()))
}

View File

@ -102,8 +102,8 @@
//! // TODO: storage formats
//!
use std::{error, fmt, mem};
use std::vec::IntoIter as VecIntoIter;
use std::{mem, error, fmt};
use half::f16;
@ -141,7 +141,9 @@ pub struct IncompatiblePixelsType;
impl error::Error for IncompatiblePixelsType {
#[inline]
fn description(&self) -> &str { "supplied pixels' type is incompatible with this format" }
fn description(&self) -> &str {
"supplied pixels' type is incompatible with this format"
}
}
impl fmt::Display for IncompatiblePixelsType {
@ -166,7 +168,9 @@ pub unsafe trait AcceptsPixels<T> {
/// # Panics
///
/// May panic if `ensure_accepts` would not return `Ok(())`.
fn rate(&self) -> u32 { 1 }
fn rate(&self) -> u32 {
1
}
}
macro_rules! formats {
@ -780,7 +784,7 @@ impl FormatTy {
FormatTy::Depth => true,
FormatTy::Stencil => true,
FormatTy::DepthStencil => true,
_ => false
_ => false,
}
}
}

View File

@ -71,7 +71,12 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
if ds == attachment_num {
// Was normally checked by the render pass.
debug_assert!(image.parent().has_depth() || image.parent().has_stencil());
if !image.parent().inner().image.usage_depth_stencil_attachment() {
if !image
.parent()
.inner()
.image
.usage_depth_stencil_attachment()
{
return Err(IncompatibleRenderPassAttachmentError::MissingDepthStencilAttachmentUsage);
}
}

View File

@ -219,8 +219,7 @@ impl<F> AttachmentImage<F> {
///
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
#[inline]
pub fn sampled_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32,
format: F)
pub fn sampled_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
where F: FormatDesc
{
@ -237,8 +236,8 @@ impl<F> AttachmentImage<F> {
///
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
#[inline]
pub fn sampled_multisampled_input_attachment(device: Arc<Device>, dimensions: [u32; 2],
samples: u32, format: F)
pub fn sampled_multisampled_input_attachment(
device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
where F: FormatDesc
{
@ -312,8 +311,8 @@ impl<F> AttachmentImage<F> {
///
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
#[inline]
pub fn transient_multisampled_input_attachment(device: Arc<Device>, dimensions: [u32; 2],
samples: u32, format: F)
pub fn transient_multisampled_input_attachment(
device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
where F: FormatDesc
{

View File

@ -46,8 +46,8 @@ use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use sync::AccessError;
use sync::Sharing;
use sync::NowFuture;
use sync::Sharing;
/// Image whose purpose is to be used for read-only purposes. You can write to the image once,
/// but then you must only ever read from it.
@ -101,7 +101,13 @@ impl<F> ImmutableImage<F> {
..ImageUsage::none()
};
let (image, _) = ImmutableImage::uninitialized(device, dimensions, format, mipmaps, usage, ImageLayout::ShaderReadOnlyOptimal, queue_families)?;
let (image, _) = ImmutableImage::uninitialized(device,
dimensions,
format,
mipmaps,
usage,
ImageLayout::ShaderReadOnlyOptimal,
queue_families)?;
image.initialized.store(true, Ordering::Relaxed); // Allow uninitialized access for backwards compatibility
Ok(image)
}
@ -109,8 +115,9 @@ impl<F> ImmutableImage<F> {
/// Builds an uninitialized immutable image.
///
/// Returns two things: the image, and a special access that should be used for the initial upload to the image.
pub fn uninitialized<'a, I, M>(device: Arc<Device>, dimensions: Dimensions, format: F,
mipmaps: M, usage: ImageUsage, layout: ImageLayout, queue_families: I)
pub fn uninitialized<'a, I, M>(
device: Arc<Device>, dimensions: Dimensions, format: F, mipmaps: M, usage: ImageUsage,
layout: ImageLayout, queue_families: I)
-> Result<(Arc<ImmutableImage<F>>, ImmutableImageInitialization<F>), ImageCreationError>
where F: FormatDesc,
I: IntoIterator<Item = QueueFamily<'a>>,
@ -184,12 +191,13 @@ impl<F> ImmutableImage<F> {
/// TODO: Support mipmaps
#[inline]
pub fn from_iter<P, I>(iter: I, dimensions: Dimensions, format: F, queue: Arc<Queue>)
-> Result<(Arc<Self>, CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
-> Result<(Arc<Self>,
CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
ImageCreationError>
where P: Send + Sync + Clone + 'static,
F: FormatDesc + AcceptsPixels<P> + 'static + Send + Sync,
I: ExactSizeIterator<Item = P>,
Format: AcceptsPixels<P>,
Format: AcceptsPixels<P>
{
let source = CpuAccessibleBuffer::from_iter(queue.device().clone(),
BufferUsage::transfer_source(),
@ -201,24 +209,41 @@ impl<F> ImmutableImage<F> {
///
/// TODO: Support mipmaps
pub fn from_buffer<B, P>(source: B, dimensions: Dimensions, format: F, queue: Arc<Queue>)
-> Result<(Arc<Self>, CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
-> Result<(Arc<Self>,
CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
ImageCreationError>
where B: BufferAccess + TypedBufferAccess<Content = [P]> + 'static + Clone + Send + Sync,
P: Send + Sync + Clone + 'static,
F: FormatDesc + AcceptsPixels<P> + 'static + Send + Sync,
Format: AcceptsPixels<P>,
Format: AcceptsPixels<P>
{
let usage = ImageUsage { transfer_destination: true, sampled: true, ..ImageUsage::none() };
let usage = ImageUsage {
transfer_destination: true,
sampled: true,
..ImageUsage::none()
};
let layout = ImageLayout::ShaderReadOnlyOptimal;
let (buffer, init) = ImmutableImage::uninitialized(source.device().clone(),
dimensions, format,
MipmapsCount::One, usage, layout,
let (buffer, init) =
ImmutableImage::uninitialized(source.device().clone(),
dimensions,
format,
MipmapsCount::One,
usage,
layout,
source.device().active_queue_families())?;
let cb = AutoCommandBufferBuilder::new(source.device().clone(), queue.family())?
.copy_buffer_to_image_dimensions(source, init, [0, 0, 0], dimensions.width_height_depth(), 0, dimensions.array_layers_with_cube(), 0).unwrap()
.build().unwrap();
.copy_buffer_to_image_dimensions(source,
init,
[0, 0, 0],
dimensions.width_height_depth(),
0,
dimensions.array_layers_with_cube(),
0)
.unwrap()
.build()
.unwrap();
let future = match cb.execute(queue) {
Ok(f) => f,
@ -244,7 +269,7 @@ impl<F, A> ImmutableImage<F, A> {
}
unsafe impl<F, A> ImageAccess for ImmutableImage<F, A>
where F: 'static + Send + Sync,
where F: 'static + Send + Sync
{
#[inline]
fn inner(&self) -> ImageInner {
@ -286,14 +311,16 @@ unsafe impl<F, A> ImageAccess for ImmutableImage<F, A>
}
#[inline]
unsafe fn increase_gpu_lock(&self) {}
unsafe fn increase_gpu_lock(&self) {
}
#[inline]
unsafe fn unlock(&self) {}
unsafe fn unlock(&self) {
}
}
unsafe impl<P, F, A> ImageContent<P> for ImmutableImage<F, A>
where F: 'static + Send + Sync,
where F: 'static + Send + Sync
{
#[inline]
fn matches_format(&self) -> bool {
@ -302,7 +329,7 @@ unsafe impl<P, F, A> ImageContent<P> for ImmutableImage<F, A>
}
unsafe impl<F: 'static, A> ImageViewAccess for ImmutableImage<F, A>
where F: 'static + Send + Sync,
where F: 'static + Send + Sync
{
#[inline]
fn parent(&self) -> &ImageAccess {
@ -346,7 +373,7 @@ unsafe impl<F: 'static, A> ImageViewAccess for ImmutableImage<F, A>
}
unsafe impl<F, A> ImageAccess for ImmutableImageInitialization<F, A>
where F: 'static + Send + Sync,
where F: 'static + Send + Sync
{
#[inline]
fn inner(&self) -> ImageInner {

View File

@ -482,7 +482,10 @@ impl ImageDimensions {
}
Some(match *self {
ImageDimensions::Dim1d { width, array_layers } => {
ImageDimensions::Dim1d {
width,
array_layers,
} => {
debug_assert_ne!(width, 0);
ImageDimensions::Dim1d {
array_layers: array_layers,
@ -490,7 +493,12 @@ impl ImageDimensions {
}
},
ImageDimensions::Dim2d { width, height, array_layers, cubemap_compatible } => {
ImageDimensions::Dim2d {
width,
height,
array_layers,
cubemap_compatible,
} => {
debug_assert_ne!(width, 0);
debug_assert_ne!(height, 0);
ImageDimensions::Dim2d {
@ -501,7 +509,11 @@ impl ImageDimensions {
}
},
ImageDimensions::Dim3d { width, height, depth } => {
ImageDimensions::Dim3d {
width,
height,
depth,
} => {
debug_assert_ne!(width, 0);
debug_assert_ne!(height, 0);
ImageDimensions::Dim3d {
@ -520,29 +532,103 @@ mod tests {
#[test]
fn max_mipmaps() {
let dims = ImageDimensions::Dim2d { width: 2, height: 1, cubemap_compatible: false, array_layers: 1 };
let dims = ImageDimensions::Dim2d {
width: 2,
height: 1,
cubemap_compatible: false,
array_layers: 1,
};
assert_eq!(dims.max_mipmaps(), 2);
let dims = ImageDimensions::Dim2d { width: 2, height: 3, cubemap_compatible: false, array_layers: 1 };
let dims = ImageDimensions::Dim2d {
width: 2,
height: 3,
cubemap_compatible: false,
array_layers: 1,
};
assert_eq!(dims.max_mipmaps(), 3);
let dims = ImageDimensions::Dim2d { width: 512, height: 512, cubemap_compatible: false, array_layers: 1 };
let dims = ImageDimensions::Dim2d {
width: 512,
height: 512,
cubemap_compatible: false,
array_layers: 1,
};
assert_eq!(dims.max_mipmaps(), 10);
}
#[test]
fn mipmap_dimensions() {
let dims = ImageDimensions::Dim2d { width: 283, height: 175, cubemap_compatible: false, array_layers: 1 };
let dims = ImageDimensions::Dim2d {
width: 283,
height: 175,
cubemap_compatible: false,
array_layers: 1,
};
assert_eq!(dims.mipmap_dimensions(0), Some(dims));
assert_eq!(dims.mipmap_dimensions(1), Some(ImageDimensions::Dim2d { width: 256, height: 128, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(2), Some(ImageDimensions::Dim2d { width: 128, height: 64, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(3), Some(ImageDimensions::Dim2d { width: 64, height: 32, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(4), Some(ImageDimensions::Dim2d { width: 32, height: 16, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(5), Some(ImageDimensions::Dim2d { width: 16, height: 8, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(6), Some(ImageDimensions::Dim2d { width: 8, height: 4, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(7), Some(ImageDimensions::Dim2d { width: 4, height: 2, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(8), Some(ImageDimensions::Dim2d { width: 2, height: 1, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(9), Some(ImageDimensions::Dim2d { width: 1, height: 1, cubemap_compatible: false, array_layers: 1 }));
assert_eq!(dims.mipmap_dimensions(1),
Some(ImageDimensions::Dim2d {
width: 256,
height: 128,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(2),
Some(ImageDimensions::Dim2d {
width: 128,
height: 64,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(3),
Some(ImageDimensions::Dim2d {
width: 64,
height: 32,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(4),
Some(ImageDimensions::Dim2d {
width: 32,
height: 16,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(5),
Some(ImageDimensions::Dim2d {
width: 16,
height: 8,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(6),
Some(ImageDimensions::Dim2d {
width: 8,
height: 4,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(7),
Some(ImageDimensions::Dim2d {
width: 4,
height: 2,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(8),
Some(ImageDimensions::Dim2d {
width: 2,
height: 1,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(9),
Some(ImageDimensions::Dim2d {
width: 1,
height: 1,
cubemap_compatible: false,
array_layers: 1,
}));
assert_eq!(dims.mipmap_dimensions(10), None);
}
}

View File

@ -30,8 +30,8 @@ use image::traits::ImageContent;
use image::traits::ImageViewAccess;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::pool::AllocLayout;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;

View File

@ -29,8 +29,8 @@ use image::ImageUsage;
use image::MipmapsCount;
use image::ViewType;
use memory::DeviceMemory;
use memory::MemoryRequirements;
use memory::DeviceMemoryAllocError;
use memory::MemoryRequirements;
use sync::Sharing;
use Error;
@ -517,7 +517,9 @@ impl UnsafeImage {
let mut output = vk::MemoryRequirements2KHR {
sType: vk::STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
pNext: output2.as_mut().map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
pNext: output2
.as_mut()
.map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
.unwrap_or(ptr::null_mut()) as *mut _,
memoryRequirements: mem::uninitialized(),
};

View File

@ -11,9 +11,9 @@ use std::collections::HashSet;
use std::error;
use std::ffi::{CStr, CString};
use std::fmt;
use std::iter::FromIterator;
use std::ptr;
use std::str;
use std::iter::FromIterator;
use Error;
use OomError;

View File

@ -94,8 +94,10 @@ pub struct Instance {
}
// TODO: fix the underlying cause instead
impl ::std::panic::UnwindSafe for Instance {}
impl ::std::panic::RefUnwindSafe for Instance {}
impl ::std::panic::UnwindSafe for Instance {
}
impl ::std::panic::RefUnwindSafe for Instance {
}
impl Instance {
/// Initializes a new instance of Vulkan.
@ -133,14 +135,16 @@ impl Instance {
.map(|&layer| CString::new(layer).unwrap())
.collect::<SmallVec<[_; 16]>>();
Instance::new_inner(app_infos, extensions.into(), layers,
Instance::new_inner(app_infos,
extensions.into(),
layers,
OwnedOrRef::Ref(loader::auto_loader()?))
}
/// Same as `new`, but allows specifying a loader where to load Vulkan from.
pub fn with_loader<'a, L, Ext>(loader: FunctionPointers<Box<Loader + Send + Sync>>,
app_infos: Option<&ApplicationInfo>, extensions: Ext,
layers: L) -> Result<Arc<Instance>, InstanceCreationError>
app_infos: Option<&ApplicationInfo>, extensions: Ext, layers: L)
-> Result<Arc<Instance>, InstanceCreationError>
where L: IntoIterator<Item = &'a &'a str>,
Ext: Into<RawInstanceExtensions>
{
@ -149,7 +153,9 @@ impl Instance {
.map(|&layer| CString::new(layer).unwrap())
.collect::<SmallVec<[_; 16]>>();
Instance::new_inner(app_infos, extensions.into(), layers,
Instance::new_inner(app_infos,
extensions.into(),
layers,
OwnedOrRef::Owned(loader))
}

View File

@ -21,6 +21,7 @@
//! By default vulkano will use the `auto_loader()` function, which tries to automatically load
//! a Vulkan implementation from the system.
use shared_library;
use std::error;
use std::fmt;
use std::mem;
@ -28,7 +29,6 @@ use std::ops::Deref;
use std::os::raw::c_char;
use std::os::raw::c_void;
use std::path::Path;
use shared_library;
use SafeDeref;
use vk;
@ -75,9 +75,12 @@ impl DynamicLibraryLoader {
.map_err(LoadingError::LibraryLoadFailure)?;
let get_proc_addr = {
let ptr: *mut c_void = vk_lib
let ptr: *mut c_void =
vk_lib
.symbol("vkGetInstanceProcAddr")
.map_err(|_| LoadingError::MissingEntryPoint("vkGetInstanceProcAddr".to_owned()))?;
.map_err(|_| {
LoadingError::MissingEntryPoint("vkGetInstanceProcAddr".to_owned())
})?;
mem::transmute(ptr)
};
@ -107,11 +110,7 @@ impl<L> FunctionPointers<L> {
pub fn new(loader: L) -> FunctionPointers<L>
where L: Loader
{
let entry_points = vk::EntryPoints::load(|name| {
unsafe {
mem::transmute(loader.get_instance_proc_addr(0, name.as_ptr()))
}
});
let entry_points = vk::EntryPoints::load(|name| unsafe { mem::transmute(loader.get_instance_proc_addr(0, name.as_ptr())) });
FunctionPointers {
loader,
@ -169,7 +168,8 @@ macro_rules! statically_linked_vulkan_loader {
/// This function tries to auto-guess where to find the Vulkan implementation, and loads it in a
/// `lazy_static!`. The content of the lazy_static is then returned, or an error if we failed to
/// load Vulkan.
pub fn auto_loader()
pub fn auto_loader(
)
-> Result<&'static FunctionPointers<Box<Loader + Send + Sync>>, LoadingError>
{
#[cfg(any(target_os = "macos", target_os = "ios"))]
@ -194,9 +194,7 @@ pub fn auto_loader()
Path::new("libvulkan.so")
}
let loader = unsafe {
DynamicLibraryLoader::new(get_path())?
};
let loader = unsafe { DynamicLibraryLoader::new(get_path())? };
Ok(Box::new(loader))
}
@ -209,7 +207,7 @@ pub fn auto_loader()
match DEFAULT_LOADER.deref() {
&Ok(ref ptr) => Ok(ptr),
&Err(ref err) => Err(err.clone())
&Err(ref err) => Err(err.clone()),
}
}
@ -263,7 +261,7 @@ mod tests {
unsafe {
match DynamicLibraryLoader::new("_non_existing_library.void") {
Err(LoadingError::LibraryLoadFailure(_)) => (),
_ => panic!()
_ => panic!(),
}
}
}

View File

@ -7,9 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::error;
use std::fmt;
use std::mem;
use std::error;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Range;
@ -17,8 +17,8 @@ use std::os::raw::c_void;
use std::ptr;
use std::sync::Arc;
use OomError;
use Error;
use OomError;
use VulkanObject;
use check_errors;
use device::Device;
@ -93,7 +93,7 @@ impl DeviceMemory {
let physical_device = device.physical_device();
let mut allocation_count = device.allocation_count().lock().expect("Poisoned mutex");
if *allocation_count >= physical_device.limits().max_memory_allocation_count() {
return Err(DeviceMemoryAllocError::TooManyObjects)
return Err(DeviceMemoryAllocError::TooManyObjects);
}
let vk = device.pointers();
@ -118,7 +118,7 @@ impl DeviceMemory {
},
DedicatedAlloc::None => {
None
}
},
}
} else {
None
@ -126,7 +126,10 @@ impl DeviceMemory {
let infos = vk::MemoryAllocateInfo {
sType: vk::STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
pNext: dedicated_alloc_info.as_ref().map(|i| i as *const vk::MemoryDedicatedAllocateInfoKHR).unwrap_or(ptr::null()) as *const _,
pNext: dedicated_alloc_info
.as_ref()
.map(|i| i as *const vk::MemoryDedicatedAllocateInfoKHR)
.unwrap_or(ptr::null()) as *const _,
allocationSize: size as u64,
memoryTypeIndex: memory_type.id(),
};
@ -238,7 +241,10 @@ impl Drop for DeviceMemory {
unsafe {
let vk = self.device.pointers();
vk.FreeMemory(self.device.internal_object(), self.memory, ptr::null());
let mut allocation_count = self.device.allocation_count().lock().expect("Poisoned mutex");
let mut allocation_count = self.device
.allocation_count()
.lock()
.expect("Poisoned mutex");
*allocation_count -= 1;
}
}
@ -467,7 +473,8 @@ impl error::Error for DeviceMemoryAllocError {
fn description(&self) -> &str {
match *self {
DeviceMemoryAllocError::OomError(_) => "not enough memory available",
DeviceMemoryAllocError::TooManyObjects => "the maximum number of allocations has been exceeded",
DeviceMemoryAllocError::TooManyObjects =>
"the maximum number of allocations has been exceeded",
DeviceMemoryAllocError::MemoryMapFailed => "memory map failed",
}
}

View File

@ -95,8 +95,8 @@ use vk;
pub use self::device_memory::CpuAccess;
pub use self::device_memory::DeviceMemory;
pub use self::device_memory::MappedDeviceMemory;
pub use self::device_memory::DeviceMemoryAllocError;
pub use self::device_memory::MappedDeviceMemory;
pub use self::pool::MemoryPool;
mod device_memory;

View File

@ -16,8 +16,8 @@ use device::Device;
use instance::Instance;
use instance::MemoryType;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
use memory::DeviceMemoryAllocError;
use memory::MappedDeviceMemory;
/// Memory pool that operates on a given memory type.
#[derive(Debug)]

View File

@ -11,9 +11,9 @@ use device::DeviceOwned;
use instance::MemoryType;
use memory::DedicatedAlloc;
use memory::DeviceMemory;
use memory::DeviceMemoryAllocError;
use memory::MappedDeviceMemory;
use memory::MemoryRequirements;
use memory::DeviceMemoryAllocError;
pub use self::host_visible::StdHostVisibleMemoryTypePool;
pub use self::host_visible::StdHostVisibleMemoryTypePoolAlloc;
@ -53,7 +53,8 @@ pub unsafe trait MemoryPool: DeviceOwned {
/// - Panics if `alignment` is 0.
///
fn alloc_generic(&self, ty: MemoryType, size: usize, alignment: usize, layout: AllocLayout,
map: MappingRequirement) -> Result<Self::Alloc, DeviceMemoryAllocError>;
map: MappingRequirement)
-> Result<Self::Alloc, DeviceMemoryAllocError>;
/// Chooses a memory type and allocates memory from it.
///
@ -84,8 +85,9 @@ pub unsafe trait MemoryPool: DeviceOwned {
/// - Panics if `size` is 0.
/// - Panics if `alignment` is 0.
///
fn alloc_from_requirements<F>(&self, requirements: &MemoryRequirements, layout: AllocLayout,
map: MappingRequirement, dedicated: DedicatedAlloc, mut filter: F)
fn alloc_from_requirements<F>(
&self, requirements: &MemoryRequirements, layout: AllocLayout, map: MappingRequirement,
dedicated: DedicatedAlloc, mut filter: F)
-> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
where F: FnMut(MemoryType) -> AllocFromRequirementsFilter
{
@ -110,33 +112,44 @@ pub unsafe trait MemoryPool: DeviceOwned {
.filter(|&(t, _)| (requirements.memory_type_bits & (1 << t.id())) != 0)
.filter(|&(t, rq)| filter(t) == rq)
.next()
.expect("Couldn't find a memory type to allocate from").0
.expect("Couldn't find a memory type to allocate from")
.0
};
// Redirect to `self.alloc_generic` if we don't perform a dedicated allocation.
if !requirements.prefer_dedicated ||
!self.device().loaded_extensions().khr_dedicated_allocation
{
let alloc = self.alloc_generic(mem_ty, requirements.size, requirements.alignment,
layout, map)?;
let alloc = self.alloc_generic(mem_ty,
requirements.size,
requirements.alignment,
layout,
map)?;
return Ok(alloc.into());
}
if let DedicatedAlloc::None = dedicated {
let alloc = self.alloc_generic(mem_ty, requirements.size, requirements.alignment,
layout, map)?;
let alloc = self.alloc_generic(mem_ty,
requirements.size,
requirements.alignment,
layout,
map)?;
return Ok(alloc.into());
}
// If we reach here, then we perform a dedicated alloc.
match map {
MappingRequirement::Map => {
let mem = DeviceMemory::dedicated_alloc_and_map(self.device().clone(), mem_ty,
requirements.size, dedicated)?;
let mem = DeviceMemory::dedicated_alloc_and_map(self.device().clone(),
mem_ty,
requirements.size,
dedicated)?;
Ok(PotentialDedicatedAllocation::DedicatedMapped(mem))
},
MappingRequirement::DoNotMap => {
let mem = DeviceMemory::dedicated_alloc(self.device().clone(), mem_ty,
requirements.size, dedicated)?;
let mem = DeviceMemory::dedicated_alloc(self.device().clone(),
mem_ty,
requirements.size,
dedicated)?;
Ok(PotentialDedicatedAllocation::Dedicated(mem))
},
}

View File

@ -18,8 +18,8 @@ use device::Device;
use device::DeviceOwned;
use instance::MemoryType;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
use memory::DeviceMemoryAllocError;
use memory::MappedDeviceMemory;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
@ -34,7 +34,8 @@ pub struct StdMemoryPool {
device: Arc<Device>,
// For each memory type index, stores the associated pool.
pools: Mutex<HashMap<(u32, AllocLayout, MappingRequirement), Pool, BuildHasherDefault<FnvHasher>>>,
pools:
Mutex<HashMap<(u32, AllocLayout, MappingRequirement), Pool, BuildHasherDefault<FnvHasher>>>,
}
impl StdMemoryPool {
@ -86,8 +87,7 @@ unsafe impl MemoryPool for Arc<StdMemoryPool> {
Entry::Vacant(entry) => {
if memory_type_host_visible {
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(),
memory_type);
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
entry.insert(Pool::HostVisible(pool.clone()));
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);

View File

@ -76,8 +76,8 @@ impl<Pl> ComputePipeline<Pl> {
///
/// An error will be returned if the pipeline layout isn't a superset of what the shader
/// uses.
pub fn with_pipeline_layout<Cs>(
device: Arc<Device>, shader: &Cs, specialization: &Cs::SpecializationConstants,
pub fn with_pipeline_layout<Cs>(device: Arc<Device>, shader: &Cs,
specialization: &Cs::SpecializationConstants,
pipeline_layout: Pl)
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
where Cs::PipelineLayout: Clone,
@ -366,24 +366,24 @@ impl From<Error> for ComputePipelineCreationError {
#[cfg(test)]
mod tests {
use std::ffi::CStr;
use std::sync::Arc;
use buffer::BufferUsage;
use buffer::CpuAccessibleBuffer;
use command_buffer::AutoCommandBufferBuilder;
use descriptor::descriptor::DescriptorBufferDesc;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::DescriptorDescTy;
use descriptor::descriptor::DescriptorBufferDesc;
use descriptor::descriptor::ShaderStages;
use descriptor::descriptor_set::PersistentDescriptorSet;
use descriptor::pipeline_layout::PipelineLayoutDesc;
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
use pipeline::ComputePipeline;
use pipeline::shader::ShaderModule;
use pipeline::shader::SpecializationConstants;
use pipeline::shader::SpecializationMapEntry;
use pipeline::ComputePipeline;
use sync::now;
use std::ffi::CStr;
use std::sync::Arc;
use sync::GpuFuture;
use sync::now;
// TODO: test for basic creation
// TODO: test for pipeline layout error
@ -412,33 +412,488 @@ mod tests {
write.write = VALUE;
}
*/
const MODULE: [u8; 480] = [3, 2, 35, 7, 0, 0, 1, 0, 1, 0, 8, 0, 14, 0, 0, 0, 0, 0, 0,
0, 17, 0, 2, 0, 1, 0, 0, 0, 11, 0, 6, 0, 1, 0, 0, 0, 71, 76,
83, 76, 46, 115, 116, 100, 46, 52, 53, 48, 0, 0, 0, 0, 14,
0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 15, 0, 5, 0, 5, 0, 0, 0, 4,
0, 0, 0, 109, 97, 105, 110, 0, 0, 0, 0, 16, 0, 6, 0, 4, 0,
0, 0, 17, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0,
3, 0, 2, 0, 0, 0, 194, 1, 0, 0, 5, 0, 4, 0, 4, 0, 0, 0, 109,
97, 105, 110, 0, 0, 0, 0, 5, 0, 4, 0, 7, 0, 0, 0, 79, 117,
116, 112, 117, 116, 0, 0, 6, 0, 5, 0, 7, 0, 0, 0, 0, 0, 0,
0, 119, 114, 105, 116, 101, 0, 0, 0, 5, 0, 4, 0, 9, 0, 0,
0, 119, 114, 105, 116, 101, 0, 0, 0, 5, 0, 4, 0, 11, 0, 0,
0, 86, 65, 76, 85, 69, 0, 0, 0, 72, 0, 5, 0, 7, 0, 0, 0, 0,
0, 0, 0, 35, 0, 0, 0, 0, 0, 0, 0, 71, 0, 3, 0, 7, 0, 0, 0,
3, 0, 0, 0, 71, 0, 4, 0, 9, 0, 0, 0, 34, 0, 0, 0, 0, 0, 0,
0, 71, 0, 4, 0, 9, 0, 0, 0, 33, 0, 0, 0, 0, 0, 0, 0, 71, 0,
4, 0, 11, 0, 0, 0, 1, 0, 0, 0, 83, 0, 0, 0, 19, 0, 2, 0, 2,
0, 0, 0, 33, 0, 3, 0, 3, 0, 0, 0, 2, 0, 0, 0, 21, 0, 4, 0,
6, 0, 0, 0, 32, 0, 0, 0, 1, 0, 0, 0, 30, 0, 3, 0, 7, 0, 0,
0, 6, 0, 0, 0, 32, 0, 4, 0, 8, 0, 0, 0, 2, 0, 0, 0, 7, 0, 0,
0, 59, 0, 4, 0, 8, 0, 0, 0, 9, 0, 0, 0, 2, 0, 0, 0, 43, 0,
4, 0, 6, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 50, 0, 4, 0, 6,
0, 0, 0, 11, 0, 0, 0, 239, 190, 173, 222, 32, 0, 4, 0, 12,
0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0, 54, 0, 5, 0, 2, 0, 0, 0, 4,
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 248, 0, 2, 0, 5, 0, 0, 0,
65, 0, 5, 0, 12, 0, 0, 0, 13, 0, 0, 0, 9, 0, 0, 0, 10, 0, 0,
0, 62, 0, 3, 0, 13, 0, 0, 0, 11, 0, 0, 0, 253, 0, 1, 0, 56,
0, 1, 0];
const MODULE: [u8; 480] = [
3,
2,
35,
7,
0,
0,
1,
0,
1,
0,
8,
0,
14,
0,
0,
0,
0,
0,
0,
0,
17,
0,
2,
0,
1,
0,
0,
0,
11,
0,
6,
0,
1,
0,
0,
0,
71,
76,
83,
76,
46,
115,
116,
100,
46,
52,
53,
48,
0,
0,
0,
0,
14,
0,
3,
0,
0,
0,
0,
0,
1,
0,
0,
0,
15,
0,
5,
0,
5,
0,
0,
0,
4,
0,
0,
0,
109,
97,
105,
110,
0,
0,
0,
0,
16,
0,
6,
0,
4,
0,
0,
0,
17,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
3,
0,
3,
0,
2,
0,
0,
0,
194,
1,
0,
0,
5,
0,
4,
0,
4,
0,
0,
0,
109,
97,
105,
110,
0,
0,
0,
0,
5,
0,
4,
0,
7,
0,
0,
0,
79,
117,
116,
112,
117,
116,
0,
0,
6,
0,
5,
0,
7,
0,
0,
0,
0,
0,
0,
0,
119,
114,
105,
116,
101,
0,
0,
0,
5,
0,
4,
0,
9,
0,
0,
0,
119,
114,
105,
116,
101,
0,
0,
0,
5,
0,
4,
0,
11,
0,
0,
0,
86,
65,
76,
85,
69,
0,
0,
0,
72,
0,
5,
0,
7,
0,
0,
0,
0,
0,
0,
0,
35,
0,
0,
0,
0,
0,
0,
0,
71,
0,
3,
0,
7,
0,
0,
0,
3,
0,
0,
0,
71,
0,
4,
0,
9,
0,
0,
0,
34,
0,
0,
0,
0,
0,
0,
0,
71,
0,
4,
0,
9,
0,
0,
0,
33,
0,
0,
0,
0,
0,
0,
0,
71,
0,
4,
0,
11,
0,
0,
0,
1,
0,
0,
0,
83,
0,
0,
0,
19,
0,
2,
0,
2,
0,
0,
0,
33,
0,
3,
0,
3,
0,
0,
0,
2,
0,
0,
0,
21,
0,
4,
0,
6,
0,
0,
0,
32,
0,
0,
0,
1,
0,
0,
0,
30,
0,
3,
0,
7,
0,
0,
0,
6,
0,
0,
0,
32,
0,
4,
0,
8,
0,
0,
0,
2,
0,
0,
0,
7,
0,
0,
0,
59,
0,
4,
0,
8,
0,
0,
0,
9,
0,
0,
0,
2,
0,
0,
0,
43,
0,
4,
0,
6,
0,
0,
0,
10,
0,
0,
0,
0,
0,
0,
0,
50,
0,
4,
0,
6,
0,
0,
0,
11,
0,
0,
0,
239,
190,
173,
222,
32,
0,
4,
0,
12,
0,
0,
0,
2,
0,
0,
0,
6,
0,
0,
0,
54,
0,
5,
0,
2,
0,
0,
0,
4,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
248,
0,
2,
0,
5,
0,
0,
0,
65,
0,
5,
0,
12,
0,
0,
0,
13,
0,
0,
0,
9,
0,
0,
0,
10,
0,
0,
0,
62,
0,
3,
0,
13,
0,
0,
0,
11,
0,
0,
0,
253,
0,
1,
0,
56,
0,
1,
0,
];
ShaderModule::new(device.clone(), &MODULE).unwrap()
};
@ -446,11 +901,13 @@ mod tests {
#[derive(Debug, Copy, Clone)]
struct Layout;
unsafe impl PipelineLayoutDesc for Layout {
fn num_sets(&self) -> usize { 1 }
fn num_sets(&self) -> usize {
1
}
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
match set {
0 => Some(1),
_ => None
_ => None,
}
}
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
@ -461,13 +918,18 @@ mod tests {
storage: true,
}),
array_count: 1,
stages: ShaderStages { compute: true, .. ShaderStages::none() },
stages: ShaderStages {
compute: true,
..ShaderStages::none()
},
readonly: true,
}),
_ => None
_ => None,
}
}
fn num_push_constants_ranges(&self) -> usize { 0 }
fn num_push_constants_ranges(&self) -> usize {
0
}
fn push_constants_range(&self, num: usize) -> Option<PipelineLayoutDescPcRange> {
None
}
@ -480,7 +942,9 @@ mod tests {
#[derive(Debug, Copy, Clone)]
#[allow(non_snake_case)]
#[repr(C)]
struct SpecConsts { VALUE: i32 }
struct SpecConsts {
VALUE: i32,
}
unsafe impl SpecializationConstants for SpecConsts {
fn descriptors() -> &'static [SpecializationMapEntry] {
static DESCRIPTORS: [SpecializationMapEntry; 1] = [
@ -488,29 +952,38 @@ mod tests {
constant_id: 83,
offset: 0,
size: 4,
}
},
];
&DESCRIPTORS
}
}
let pipeline = Arc::new(ComputePipeline::new(device.clone(), &shader,
&SpecConsts { VALUE: 0x12345678 }).unwrap());
let pipeline = Arc::new(ComputePipeline::new(device.clone(),
&shader,
&SpecConsts { VALUE: 0x12345678 })
.unwrap());
let data_buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
0).unwrap();
let data_buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0)
.unwrap();
let set = PersistentDescriptorSet::start(pipeline.clone(), 0)
.add_buffer(data_buffer.clone()).unwrap()
.build().unwrap();
.add_buffer(data_buffer.clone())
.unwrap()
.build()
.unwrap();
let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(device.clone(),
queue.family()).unwrap()
.dispatch([1, 1, 1], pipeline, set, ()).unwrap()
.build().unwrap();
queue.family())
.unwrap()
.dispatch([1, 1, 1], pipeline, set, ())
.unwrap()
.build()
.unwrap();
let future = now(device.clone())
.then_execute(queue.clone(), command_buffer).unwrap()
.then_signal_fence_and_flush().unwrap();
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
future.wait(None).unwrap();
let data_buffer_content = data_buffer.read().unwrap();

View File

@ -84,11 +84,16 @@ impl GraphicsPipeline<(), (), ()> {
/// fill with the various parameters.
pub fn start<'a>()
-> GraphicsPipelineBuilder<SingleBufferDefinition<()>,
EmptyEntryPointDummy, (),
EmptyEntryPointDummy, (),
EmptyEntryPointDummy, (),
EmptyEntryPointDummy, (),
EmptyEntryPointDummy, (),
EmptyEntryPointDummy,
(),
EmptyEntryPointDummy,
(),
EmptyEntryPointDummy,
(),
EmptyEntryPointDummy,
(),
EmptyEntryPointDummy,
(),
()>
{
GraphicsPipelineBuilder::new()

View File

@ -100,10 +100,10 @@ impl ShaderModule {
/// - The input, output and layout must correctly describe the input, output and layout used
/// by this stage.
///
pub unsafe fn graphics_entry_point<'a, S, I, O, L>(&'a self, name: &'a CStr, input: I, output: O,
layout: L, ty: GraphicsShaderType)
-> GraphicsEntryPoint<'a, S, I, O, L>
{
pub unsafe fn graphics_entry_point<'a, S, I, O, L>(&'a self, name: &'a CStr, input: I,
output: O, layout: L,
ty: GraphicsShaderType)
-> GraphicsEntryPoint<'a, S, I, O, L> {
GraphicsEntryPoint {
module: self,
name: name,
@ -189,7 +189,7 @@ unsafe impl<'a, S, I, O, L> EntryPointAbstract for GraphicsEntryPoint<'a, S, I,
where L: PipelineLayoutDesc,
I: ShaderInterfaceDef,
O: ShaderInterfaceDef,
S: SpecializationConstants,
S: SpecializationConstants
{
type PipelineLayout = L;
type SpecializationConstants = S;
@ -214,7 +214,7 @@ unsafe impl<'a, S, I, O, L> GraphicsEntryPointAbstract for GraphicsEntryPoint<'a
where L: PipelineLayoutDesc,
I: ShaderInterfaceDef,
O: ShaderInterfaceDef,
S: SpecializationConstants,
S: SpecializationConstants
{
type InputDefinition = I;
type OutputDefinition = O;
@ -305,7 +305,7 @@ pub struct ComputeEntryPoint<'a, S, L> {
unsafe impl<'a, S, L> EntryPointAbstract for ComputeEntryPoint<'a, S, L>
where L: PipelineLayoutDesc,
S: SpecializationConstants,
S: SpecializationConstants
{
type PipelineLayout = L;
type SpecializationConstants = S;

View File

@ -102,8 +102,10 @@ unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for TwoBuff
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
// FIXME: safety
assert_eq!(source.len(), 2);
let vertices = [source[0].size() / mem::size_of::<T>(), source[1].size() / mem::size_of::<U>()]
.iter()
let vertices = [
source[0].size() / mem::size_of::<T>(),
source[1].size() / mem::size_of::<U>(),
].iter()
.cloned()
.min()
.unwrap();

View File

@ -85,10 +85,7 @@ impl UnsafeQueryPool {
#[inline]
pub fn query(&self, index: u32) -> Option<UnsafeQuery> {
if index < self.num_slots() {
Some(UnsafeQuery {
pool: self,
index,
})
Some(UnsafeQuery { pool: self, index })
} else {
None
}

View File

@ -296,8 +296,8 @@ pub use self::present_region::RectangleLayer;
pub use self::surface::CapabilitiesError;
pub use self::surface::Surface;
pub use self::surface::SurfaceCreationError;
pub use self::swapchain::AcquiredImage;
pub use self::swapchain::AcquireError;
pub use self::swapchain::AcquiredImage;
pub use self::swapchain::PresentFuture;
pub use self::swapchain::Swapchain;
pub use self::swapchain::SwapchainAcquireFuture;

View File

@ -6,6 +6,7 @@
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use swapchain::Swapchain;
use vk;
@ -20,7 +21,9 @@ pub struct PresentRegion {
impl PresentRegion {
/// Returns true if this present region is compatible with swapchain.
pub fn is_compatible_with(&self, swapchain: &Swapchain) -> bool {
self.rectangles.iter().all(|rect| rect.is_compatible_with(swapchain))
self.rectangles
.iter()
.all(|rect| rect.is_compatible_with(swapchain))
}
}

View File

@ -131,8 +131,8 @@ pub fn present<F>(swapchain: Arc<Swapchain>, before: F, queue: Arc<Queue>, index
/// This is just an optimizaion hint, as the vulkan driver is free to ignore the given present region.
///
/// If `VK_KHR_incremental_present` is not enabled on the device, the parameter will be ignored.
pub fn present_incremental<F>(swapchain: Arc<Swapchain>, before: F, queue: Arc<Queue>, index: usize,
present_region: PresentRegion)
pub fn present_incremental<F>(swapchain: Arc<Swapchain>, before: F, queue: Arc<Queue>,
index: usize, present_region: PresentRegion)
-> PresentFuture<F>
where F: GpuFuture
{
@ -809,14 +809,14 @@ unsafe impl GpuFuture for SwapchainAcquireFuture {
}
#[inline]
fn check_buffer_access(&self, _: &BufferAccess, _: bool, _: &Queue)
fn check_buffer_access(
&self, _: &BufferAccess, _: bool, _: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
Err(AccessCheckError::Unknown)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, _: bool,
_: &Queue)
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, _: bool, _: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
let swapchain_image = self.swapchain.raw_image(self.image_id).unwrap();
if swapchain_image.image.internal_object() != image.inner().image.internal_object() {
@ -864,7 +864,10 @@ impl Drop for SwapchainAcquireFuture {
// we know for sure that it must've been signalled).
debug_assert!({
let dur = Some(Duration::new(0, 0));
self.fence.as_ref().map(|f| f.wait(dur).is_ok()).unwrap_or(true)
self.fence
.as_ref()
.map(|f| f.wait(dur).is_ok())
.unwrap_or(true)
});
}
@ -999,24 +1002,32 @@ unsafe impl<P> GpuFuture for PresentFuture<P>
Ok(match self.previous.build_submission()? {
SubmitAnyBuilder::Empty => {
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
builder.add_swapchain(&self.swapchain,
self.image_id as u32,
self.present_region.as_ref());
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::SemaphoresWait(sem) => {
let mut builder: SubmitPresentBuilder = sem.into();
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
builder.add_swapchain(&self.swapchain,
self.image_id as u32,
self.present_region.as_ref());
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::CommandBuffer(cb) => {
cb.submit(&queue.unwrap())?; // FIXME: wrong because build_submission can be called multiple times
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
builder.add_swapchain(&self.swapchain,
self.image_id as u32,
self.present_region.as_ref());
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::BindSparse(cb) => {
cb.submit(&queue.unwrap())?; // FIXME: wrong because build_submission can be called multiple times
let mut builder = SubmitPresentBuilder::new();
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
builder.add_swapchain(&self.swapchain,
self.image_id as u32,
self.present_region.as_ref());
SubmitAnyBuilder::QueuePresent(builder)
},
SubmitAnyBuilder::QueuePresent(present) => {
@ -1139,8 +1150,7 @@ pub struct AcquiredImage {
/// a new one.
pub unsafe fn acquire_next_image_raw(swapchain: &Swapchain, timeout: Option<Duration>,
semaphore: Option<&Semaphore>, fence: Option<&Fence>)
-> Result<AcquiredImage, AcquireError>
{
-> Result<AcquiredImage, AcquireError> {
let vk = swapchain.device.pointers();
let timeout_ns = if let Some(timeout) = timeout {
@ -1153,7 +1163,8 @@ pub unsafe fn acquire_next_image_raw(swapchain: &Swapchain, timeout: Option<Dura
};
let mut out = mem::uninitialized();
let r = check_errors(vk.AcquireNextImageKHR(swapchain.device.internal_object(),
let r =
check_errors(vk.AcquireNextImageKHR(swapchain.device.internal_object(),
swapchain.swapchain,
timeout_ns,
semaphore.map(|s| s.internal_object()).unwrap_or(0),

View File

@ -35,7 +35,6 @@ pub struct Event {
}
impl Event {
/// Takes an event from the vulkano-provided event pool.
/// If the pool is empty, a new event will be allocated.
/// Upon `drop`, the event is put back into the pool.
@ -188,8 +187,8 @@ impl Drop for Event {
#[cfg(test)]
mod tests {
use sync::Event;
use VulkanObject;
use sync::Event;
#[test]
fn event_create() {

View File

@ -383,9 +383,9 @@ impl From<Error> for FenceWaitError {
#[cfg(test)]
mod tests {
use VulkanObject;
use std::time::Duration;
use sync::Fence;
use VulkanObject;
#[test]
fn fence_create() {

View File

@ -127,8 +127,8 @@ impl<D> Drop for Semaphore<D>
#[cfg(test)]
mod tests {
use sync::Semaphore;
use VulkanObject;
use sync::Semaphore;
#[test]
fn semaphore_create() {