mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-25 00:04:15 +00:00
Run rustfmt on the code (#807)
* Run rustfmt on the code * Fix compilation
This commit is contained in:
parent
5ac98f53f1
commit
9662f8b092
@ -124,11 +124,10 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
|
||||
let num_bindings_in_set_body = {
|
||||
(0 .. num_sets)
|
||||
.map(|set| {
|
||||
let num =
|
||||
descriptors
|
||||
.iter()
|
||||
.filter(|d| d.set == set)
|
||||
.fold(0, |s, d| cmp::max(s, 1 + d.binding));
|
||||
let num = descriptors
|
||||
.iter()
|
||||
.filter(|d| d.set == set)
|
||||
.fold(0, |s, d| cmp::max(s, 1 + d.binding));
|
||||
format!("{set} => Some({num}),", set = set, num = num)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
|
@ -62,10 +62,10 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
|
||||
|
||||
let (ty, f_call) = {
|
||||
if let enums::ExecutionModel::ExecutionModelGLCompute = *execution {
|
||||
(format!("::vulkano::pipeline::shader::ComputeEntryPoint<{}, Layout>", spec_consts_struct),
|
||||
format!("compute_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as \
|
||||
*const _), Layout(ShaderStages {{ compute: true, .. ShaderStages::none() \
|
||||
}}))"))
|
||||
(format!("::vulkano::pipeline::shader::ComputeEntryPoint<{}, Layout>",
|
||||
spec_consts_struct),
|
||||
format!("compute_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), \
|
||||
Layout(ShaderStages {{ compute: true, .. ShaderStages::none() }}))"))
|
||||
|
||||
} else {
|
||||
let ty = match *execution {
|
||||
@ -74,36 +74,50 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelTessellationControl => {
|
||||
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationControl".to_owned()
|
||||
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationControl"
|
||||
.to_owned()
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelTessellationEvaluation => {
|
||||
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationEvaluation".to_owned()
|
||||
"::vulkano::pipeline::shader::GraphicsShaderType::TessellationEvaluation"
|
||||
.to_owned()
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelGeometry => {
|
||||
let mut execution_mode = None;
|
||||
|
||||
for instruction in doc.instructions.iter() {
|
||||
if let &parse::Instruction::ExecutionMode { target_id, ref mode, .. } = instruction {
|
||||
if let &parse::Instruction::ExecutionMode {
|
||||
target_id,
|
||||
ref mode,
|
||||
..
|
||||
} = instruction
|
||||
{
|
||||
if target_id != id {
|
||||
continue;
|
||||
}
|
||||
execution_mode = match mode {
|
||||
&enums::ExecutionMode::ExecutionModeInputPoints => Some("Points"),
|
||||
&enums::ExecutionMode::ExecutionModeInputLines => Some("Lines"),
|
||||
&enums::ExecutionMode::ExecutionModeInputLinesAdjacency => Some("LinesWithAdjacency"),
|
||||
&enums::ExecutionMode::ExecutionModeInputLinesAdjacency =>
|
||||
Some("LinesWithAdjacency"),
|
||||
&enums::ExecutionMode::ExecutionModeTriangles => Some("Triangles"),
|
||||
&enums::ExecutionMode::ExecutionModeInputTrianglesAdjacency => Some("TrianglesWithAdjacency"),
|
||||
&enums::ExecutionMode::ExecutionModeInputTrianglesAdjacency =>
|
||||
Some("TrianglesWithAdjacency"),
|
||||
_ => continue,
|
||||
};
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
format!("::vulkano::pipeline::shader::GraphicsShaderType::Geometry(
|
||||
::vulkano::pipeline::shader::GeometryShaderExecutionMode::{0}
|
||||
)", execution_mode.unwrap())
|
||||
format!(
|
||||
"::vulkano::pipeline::shader::GraphicsShaderType::Geometry(
|
||||
\
|
||||
::vulkano::pipeline::shader::GeometryShaderExecutionMode::{0}
|
||||
\
|
||||
)",
|
||||
execution_mode.unwrap()
|
||||
)
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelFragment => {
|
||||
@ -143,7 +157,9 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
|
||||
capitalized_ep_name);
|
||||
let f = format!("graphics_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() \
|
||||
as *const _), {0}Input, {0}Output, Layout({2}), {1})",
|
||||
capitalized_ep_name, ty, stage);
|
||||
capitalized_ep_name,
|
||||
ty,
|
||||
stage);
|
||||
|
||||
(t, f)
|
||||
}
|
||||
|
@ -166,14 +166,8 @@ pub enum Instruction {
|
||||
result_id: u32,
|
||||
data: Vec<u32>,
|
||||
},
|
||||
SpecConstantTrue {
|
||||
result_type_id: u32,
|
||||
result_id: u32,
|
||||
},
|
||||
SpecConstantFalse {
|
||||
result_type_id: u32,
|
||||
result_id: u32,
|
||||
},
|
||||
SpecConstantTrue { result_type_id: u32, result_id: u32 },
|
||||
SpecConstantFalse { result_type_id: u32, result_id: u32 },
|
||||
SpecConstant {
|
||||
result_type_id: u32,
|
||||
result_id: u32,
|
||||
@ -254,9 +248,9 @@ fn decode_instruction(opcode: u16, operands: &[u32]) -> Result<Instruction, Pars
|
||||
Instruction::ExecutionMode {
|
||||
target_id: operands[0],
|
||||
mode: ExecutionMode::from_num(operands[1])?,
|
||||
optional_literals: operands[2..].to_vec(),
|
||||
optional_literals: operands[2 ..].to_vec(),
|
||||
}
|
||||
}
|
||||
},
|
||||
17 => Instruction::Capability(Capability::from_num(operands[0])?),
|
||||
19 => Instruction::TypeVoid { result_id: operands[0] },
|
||||
20 => Instruction::TypeBool { result_id: operands[0] },
|
||||
|
@ -43,19 +43,39 @@ pub fn write_specialization_constants(doc: &parse::Spirv) -> String {
|
||||
|
||||
for instruction in doc.instructions.iter() {
|
||||
let (type_id, result_id, default_value) = match instruction {
|
||||
&parse::Instruction::SpecConstantTrue { result_type_id, result_id } => {
|
||||
&parse::Instruction::SpecConstantTrue {
|
||||
result_type_id,
|
||||
result_id,
|
||||
} => {
|
||||
(result_type_id, result_id, "1u32".to_string())
|
||||
},
|
||||
&parse::Instruction::SpecConstantFalse { result_type_id, result_id } => {
|
||||
&parse::Instruction::SpecConstantFalse {
|
||||
result_type_id,
|
||||
result_id,
|
||||
} => {
|
||||
(result_type_id, result_id, "0u32".to_string())
|
||||
},
|
||||
&parse::Instruction::SpecConstant { result_type_id, result_id, ref data } => {
|
||||
let data = data.iter().map(|d| d.to_string() + "u32").collect::<Vec<_>>().join(", ");
|
||||
&parse::Instruction::SpecConstant {
|
||||
result_type_id,
|
||||
result_id,
|
||||
ref data,
|
||||
} => {
|
||||
let data = data.iter()
|
||||
.map(|d| d.to_string() + "u32")
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
let def_val = format!("unsafe {{ ::std::mem::transmute([{}]) }}", data);
|
||||
(result_type_id, result_id, def_val)
|
||||
},
|
||||
&parse::Instruction::SpecConstantComposite { result_type_id, result_id, ref data } => {
|
||||
let data = data.iter().map(|d| d.to_string() + "u32").collect::<Vec<_>>().join(", ");
|
||||
&parse::Instruction::SpecConstantComposite {
|
||||
result_type_id,
|
||||
result_id,
|
||||
ref data,
|
||||
} => {
|
||||
let data = data.iter()
|
||||
.map(|d| d.to_string() + "u32")
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
let def_val = format!("unsafe {{ ::std::mem::transmute([{}]) }}", data);
|
||||
(result_type_id, result_id, def_val)
|
||||
},
|
||||
@ -65,37 +85,47 @@ pub fn write_specialization_constants(doc: &parse::Spirv) -> String {
|
||||
let (rust_ty, rust_size, rust_alignment) = spec_const_type_from_id(doc, type_id);
|
||||
let rust_size = rust_size.expect("Found runtime-sized specialization constant");
|
||||
|
||||
let constant_id = doc.instructions.iter().filter_map(|i| {
|
||||
match i {
|
||||
&parse::Instruction::Decorate
|
||||
{ target_id, decoration: enums::Decoration::DecorationSpecId, ref params }
|
||||
if target_id == result_id =>
|
||||
{
|
||||
Some(params[0])
|
||||
},
|
||||
_ => None,
|
||||
}
|
||||
}).next().expect("Found a specialization constant with no SpecId decoration");
|
||||
let constant_id = doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| match i {
|
||||
&parse::Instruction::Decorate {
|
||||
target_id,
|
||||
decoration: enums::Decoration::DecorationSpecId,
|
||||
ref params,
|
||||
} if target_id == result_id => {
|
||||
Some(params[0])
|
||||
},
|
||||
_ => None,
|
||||
})
|
||||
.next()
|
||||
.expect("Found a specialization constant with no SpecId decoration");
|
||||
|
||||
spec_consts.push(SpecConst {
|
||||
name: ::name_from_id(doc, result_id),
|
||||
constant_id,
|
||||
rust_ty,
|
||||
rust_size,
|
||||
rust_alignment,
|
||||
default_value,
|
||||
});
|
||||
name: ::name_from_id(doc, result_id),
|
||||
constant_id,
|
||||
rust_ty,
|
||||
rust_size,
|
||||
rust_alignment,
|
||||
default_value,
|
||||
});
|
||||
}
|
||||
|
||||
let map_entries = {
|
||||
let mut map_entries = Vec::new();
|
||||
let mut curr_offset = 0;
|
||||
for c in &spec_consts {
|
||||
map_entries.push(format!("SpecializationMapEntry {{
|
||||
constant_id: {},
|
||||
map_entries.push(format!(
|
||||
"SpecializationMapEntry {{
|
||||
constant_id: \
|
||||
{},
|
||||
offset: {},
|
||||
size: {},
|
||||
}}", c.constant_id, curr_offset, c.rust_size));
|
||||
\
|
||||
}}",
|
||||
c.constant_id,
|
||||
curr_offset,
|
||||
c.rust_size
|
||||
));
|
||||
|
||||
assert_ne!(c.rust_size, 0);
|
||||
curr_offset += c.rust_size;
|
||||
@ -104,7 +134,8 @@ pub fn write_specialization_constants(doc: &parse::Spirv) -> String {
|
||||
map_entries
|
||||
};
|
||||
|
||||
format!(r#"
|
||||
format!(
|
||||
r#"
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[allow(non_snake_case)]
|
||||
@ -131,10 +162,16 @@ unsafe impl SpecConstsTrait for SpecializationConstants {{
|
||||
}}
|
||||
|
||||
"#,
|
||||
struct_def = spec_consts.iter().map(|c| format!("pub {}: {}", c.name, c.rust_ty))
|
||||
.collect::<Vec<_>>().join(", "),
|
||||
def_vals = spec_consts.iter().map(|c| format!("{}: {}", c.name, c.default_value))
|
||||
.collect::<Vec<_>>().join(", "),
|
||||
struct_def = spec_consts
|
||||
.iter()
|
||||
.map(|c| format!("pub {}: {}", c.name, c.rust_ty))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
def_vals = spec_consts
|
||||
.iter()
|
||||
.map(|c| format!("{}: {}", c.name, c.default_value))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
num_map_entries = map_entries.len(),
|
||||
map_entries = map_entries.join(", ")
|
||||
)
|
||||
@ -147,7 +184,7 @@ fn spec_const_type_from_id(doc: &parse::Spirv, searched: u32) -> (String, Option
|
||||
&parse::Instruction::TypeBool { result_id } if result_id == searched => {
|
||||
return ("u32".to_owned(), Some(mem::size_of::<u32>()), mem::align_of::<u32>());
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,33 +195,31 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
|
||||
|
||||
// We can only implement Clone if there's no unsized member in the struct.
|
||||
let (impl_text, derive_text) = if current_rust_offset.is_some() {
|
||||
let i =
|
||||
format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n {name} \
|
||||
{{\n{copies}\n }}\n }}\n}}\n",
|
||||
name = name,
|
||||
copies = rust_members
|
||||
.iter()
|
||||
.map(Member::copy_text)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",\n"));
|
||||
let i = format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n \
|
||||
{name} {{\n{copies}\n }}\n }}\n}}\n",
|
||||
name = name,
|
||||
copies = rust_members
|
||||
.iter()
|
||||
.map(Member::copy_text)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",\n"));
|
||||
(i, "#[derive(Copy)]")
|
||||
} else {
|
||||
("".to_owned(), "")
|
||||
};
|
||||
|
||||
let s = format!("#[repr(C)]\n\
|
||||
{derive_text}\n\
|
||||
#[allow(non_snake_case)]\n\
|
||||
pub struct {name} {{\n{members}\n}} /* total_size: {t:?} */\n{impl_text}",
|
||||
name = name,
|
||||
members = rust_members
|
||||
.iter()
|
||||
.map(Member::declaration_text)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",\n"),
|
||||
t = spirv_req_total_size,
|
||||
impl_text = impl_text,
|
||||
derive_text = derive_text);
|
||||
let s =
|
||||
format!("#[repr(C)]\n{derive_text}\n#[allow(non_snake_case)]\npub struct {name} \
|
||||
{{\n{members}\n}} /* total_size: {t:?} */\n{impl_text}",
|
||||
name = name,
|
||||
members = rust_members
|
||||
.iter()
|
||||
.map(Member::declaration_text)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",\n"),
|
||||
t = spirv_req_total_size,
|
||||
impl_text = impl_text,
|
||||
derive_text = derive_text);
|
||||
(s,
|
||||
spirv_req_total_size
|
||||
.map(|sz| sz as usize)
|
||||
|
@ -25,12 +25,12 @@ use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::ops::DerefMut;
|
||||
use std::ptr;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use std::sync::RwLockReadGuard;
|
||||
use std::sync::RwLockWriteGuard;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use buffer::BufferUsage;
|
||||
use buffer::sys::BufferCreationError;
|
||||
@ -117,8 +117,7 @@ impl<T> CpuAccessibleBuffer<T> {
|
||||
/// Builds a new uninitialized buffer. Only allowed for sized data.
|
||||
#[inline]
|
||||
pub unsafe fn uninitialized(device: Arc<Device>, usage: BufferUsage)
|
||||
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
|
||||
{
|
||||
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError> {
|
||||
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, iter::empty())
|
||||
}
|
||||
}
|
||||
@ -127,14 +126,13 @@ impl<T> CpuAccessibleBuffer<[T]> {
|
||||
/// Builds a new buffer that contains an array `T`. The initial data comes from an iterator
|
||||
/// that produces that list of Ts.
|
||||
pub fn from_iter<I>(device: Arc<Device>, usage: BufferUsage, data: I)
|
||||
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError>
|
||||
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError>
|
||||
where I: ExactSizeIterator<Item = T>,
|
||||
T: Content + 'static,
|
||||
T: Content + 'static
|
||||
{
|
||||
unsafe {
|
||||
let uninitialized = CpuAccessibleBuffer::uninitialized_array(device,
|
||||
data.len(),
|
||||
usage)?;
|
||||
let uninitialized =
|
||||
CpuAccessibleBuffer::uninitialized_array(device, data.len(), usage)?;
|
||||
|
||||
// Note that we are in panic-unsafety land here. However a panic should never ever
|
||||
// happen here, so in theory we are safe.
|
||||
@ -154,9 +152,9 @@ impl<T> CpuAccessibleBuffer<[T]> {
|
||||
|
||||
/// Builds a new buffer. Can be used for arrays.
|
||||
#[inline]
|
||||
pub unsafe fn uninitialized_array(device: Arc<Device>, len: usize, usage: BufferUsage)
|
||||
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError>
|
||||
{
|
||||
pub unsafe fn uninitialized_array(
|
||||
device: Arc<Device>, len: usize, usage: BufferUsage)
|
||||
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError> {
|
||||
CpuAccessibleBuffer::raw(device, len * mem::size_of::<T>(), usage, iter::empty())
|
||||
}
|
||||
}
|
||||
@ -169,7 +167,8 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
|
||||
/// You must ensure that the size that you pass is correct for `T`.
|
||||
///
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage,
|
||||
queue_families: I) -> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
|
||||
queue_families: I)
|
||||
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
let queue_families = queue_families
|
||||
@ -206,8 +205,8 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
|
||||
inner: buffer,
|
||||
memory: mem,
|
||||
access: RwLock::new(CurrentGpuAccess::NonExclusive {
|
||||
num: AtomicUsize::new(0)
|
||||
}),
|
||||
num: AtomicUsize::new(0),
|
||||
}),
|
||||
queue_families: queue_families,
|
||||
marker: PhantomData,
|
||||
}))
|
||||
@ -233,7 +232,7 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> {
|
||||
|
||||
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A>
|
||||
where T: Content + 'static,
|
||||
A: MemoryPoolAlloc,
|
||||
A: MemoryPoolAlloc
|
||||
{
|
||||
/// Locks the buffer in order to read its content from the CPU.
|
||||
///
|
||||
@ -569,8 +568,6 @@ mod tests {
|
||||
|
||||
const EMPTY: [i32; 0] = [];
|
||||
|
||||
let _ = CpuAccessibleBuffer::from_data(device,
|
||||
BufferUsage::all(),
|
||||
EMPTY.iter());
|
||||
let _ = CpuAccessibleBuffer::from_data(device, BufferUsage::all(), EMPTY.iter());
|
||||
}
|
||||
}
|
||||
|
@ -29,14 +29,14 @@ use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use device::Queue;
|
||||
use memory::DedicatedAlloc;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use memory::pool::AllocFromRequirementsFilter;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::MappingRequirement;
|
||||
use memory::pool::MemoryPool;
|
||||
use memory::pool::MemoryPoolAlloc;
|
||||
use memory::pool::PotentialDedicatedAllocation;
|
||||
use memory::pool::StdMemoryPool;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use sync::AccessError;
|
||||
use sync::Sharing;
|
||||
|
||||
@ -286,9 +286,7 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
/// > large enough, a new chunk of memory is automatically allocated.
|
||||
#[inline]
|
||||
pub fn next(&self, data: T) -> CpuBufferPoolSubbuffer<T, A> {
|
||||
CpuBufferPoolSubbuffer {
|
||||
chunk: self.chunk(iter::once(data))
|
||||
}
|
||||
CpuBufferPoolSubbuffer { chunk: self.chunk(iter::once(data)) }
|
||||
}
|
||||
|
||||
/// Grants access to a new subbuffer and puts `data` in it.
|
||||
@ -317,10 +315,11 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
};
|
||||
|
||||
// TODO: choose the capacity better?
|
||||
let next_capacity = cmp::max(data.len(), 1) * match *mutex {
|
||||
Some(ref b) => b.capacity * 2,
|
||||
None => 3,
|
||||
};
|
||||
let next_capacity = cmp::max(data.len(), 1) *
|
||||
match *mutex {
|
||||
Some(ref b) => b.capacity * 2,
|
||||
None => 3,
|
||||
};
|
||||
|
||||
self.reset_buf(&mut mutex, next_capacity).unwrap(); /* FIXME: propagate error */
|
||||
|
||||
@ -339,9 +338,9 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
#[inline]
|
||||
pub fn try_next(&self, data: T) -> Option<CpuBufferPoolSubbuffer<T, A>> {
|
||||
let mut mutex = self.current_buffer.lock().unwrap();
|
||||
self.try_next_impl(&mut mutex, iter::once(data)).map(|c| {
|
||||
CpuBufferPoolSubbuffer { chunk: c }
|
||||
}).ok()
|
||||
self.try_next_impl(&mut mutex, iter::once(data))
|
||||
.map(|c| CpuBufferPoolSubbuffer { chunk: c })
|
||||
.ok()
|
||||
}
|
||||
|
||||
// Creates a new buffer and sets it as current. The capacity is in number of elements.
|
||||
@ -354,7 +353,8 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
let (buffer, mem_reqs) = {
|
||||
let size_bytes = match mem::size_of::<T>().checked_mul(capacity) {
|
||||
Some(s) => s,
|
||||
None => return Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)),
|
||||
None =>
|
||||
return Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)),
|
||||
};
|
||||
|
||||
match UnsafeBuffer::new(self.device.clone(),
|
||||
@ -379,14 +379,13 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
debug_assert!(mem.mapped_memory().is_some());
|
||||
buffer.bind_memory(mem.memory(), mem.offset())?;
|
||||
|
||||
**cur_buf_mutex =
|
||||
Some(Arc::new(ActualBuffer {
|
||||
inner: buffer,
|
||||
memory: mem,
|
||||
chunks_in_use: Mutex::new(vec![]),
|
||||
next_index: AtomicUsize::new(0),
|
||||
capacity: capacity,
|
||||
}));
|
||||
**cur_buf_mutex = Some(Arc::new(ActualBuffer {
|
||||
inner: buffer,
|
||||
memory: mem,
|
||||
chunks_in_use: Mutex::new(vec![]),
|
||||
next_index: AtomicUsize::new(0),
|
||||
capacity: capacity,
|
||||
}));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -403,7 +402,8 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
// Panicks if the length of the iterator didn't match the actual number of element.
|
||||
//
|
||||
fn try_next_impl<I>(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>,
|
||||
mut data: I) -> Result<CpuBufferPoolChunk<T, A>, I>
|
||||
mut data: I)
|
||||
-> Result<CpuBufferPoolChunk<T, A>, I>
|
||||
where I: ExactSizeIterator<Item = T>
|
||||
{
|
||||
// Grab the current buffer. Return `Err` if the pool wasn't "initialized" yet.
|
||||
@ -424,13 +424,13 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
assert!(data.next().is_none(),
|
||||
"Expected iterator passed to CpuBufferPool::chunk to be empty");
|
||||
return Ok(CpuBufferPoolChunk {
|
||||
// TODO: remove .clone() once non-lexical borrows land
|
||||
buffer: current_buffer.clone(),
|
||||
index: 0,
|
||||
align_offset: 0,
|
||||
requested_len: 0,
|
||||
marker: PhantomData,
|
||||
});
|
||||
// TODO: remove .clone() once non-lexical borrows land
|
||||
buffer: current_buffer.clone(),
|
||||
index: 0,
|
||||
align_offset: 0,
|
||||
requested_len: 0,
|
||||
marker: PhantomData,
|
||||
});
|
||||
}
|
||||
|
||||
// Find a suitable offset and len, or returns if none available.
|
||||
@ -440,23 +440,30 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
// own a mutex lock to the buffer, it means that `next_index` can't be accessed
|
||||
// concurrently.
|
||||
// TODO: ^ eventually should be put inside the mutex
|
||||
let idx = current_buffer
|
||||
.next_index
|
||||
.load(Ordering::SeqCst);
|
||||
let idx = current_buffer.next_index.load(Ordering::SeqCst);
|
||||
|
||||
// Find the required alignment in bytes.
|
||||
let align_bytes = cmp::max(
|
||||
if self.usage.uniform_buffer {
|
||||
self.device().physical_device().limits()
|
||||
.min_uniform_buffer_offset_alignment() as usize
|
||||
} else { 1 },
|
||||
if self.usage.storage_buffer {
|
||||
self.device().physical_device().limits()
|
||||
.min_storage_buffer_offset_alignment() as usize
|
||||
} else { 1 },
|
||||
);
|
||||
let align_bytes = cmp::max(if self.usage.uniform_buffer {
|
||||
self.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.min_uniform_buffer_offset_alignment() as
|
||||
usize
|
||||
} else {
|
||||
1
|
||||
},
|
||||
if self.usage.storage_buffer {
|
||||
self.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.min_storage_buffer_offset_alignment() as
|
||||
usize
|
||||
} else {
|
||||
1
|
||||
});
|
||||
|
||||
let tentative_align_offset = (align_bytes - ((idx * mem::size_of::<T>()) % align_bytes)) % align_bytes;
|
||||
let tentative_align_offset =
|
||||
(align_bytes - ((idx * mem::size_of::<T>()) % align_bytes)) % align_bytes;
|
||||
let additional_len = if tentative_align_offset == 0 {
|
||||
0
|
||||
} else {
|
||||
@ -468,8 +475,12 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
|
||||
// Find out whether any chunk in use overlaps this range.
|
||||
if tentative_index + tentative_len <= current_buffer.capacity &&
|
||||
!chunks_in_use.iter().any(|c| (c.index >= tentative_index && c.index < tentative_index + tentative_len) ||
|
||||
(c.index <= tentative_index && c.index + c.len > tentative_index))
|
||||
!chunks_in_use.iter().any(|c| {
|
||||
(c.index >= tentative_index &&
|
||||
c.index < tentative_index + tentative_len) ||
|
||||
(c.index <= tentative_index &&
|
||||
c.index + c.len > tentative_index)
|
||||
})
|
||||
{
|
||||
(tentative_index, tentative_len, tentative_align_offset)
|
||||
} else {
|
||||
@ -501,19 +512,22 @@ impl<T, A> CpuBufferPool<T, A>
|
||||
ptr::write(o, i);
|
||||
written += 1;
|
||||
}
|
||||
assert_eq!(written, requested_len,
|
||||
assert_eq!(written,
|
||||
requested_len,
|
||||
"Iterator passed to CpuBufferPool::chunk has a mismatch between reported \
|
||||
length and actual number of elements");
|
||||
}
|
||||
|
||||
// Mark the chunk as in use.
|
||||
current_buffer.next_index.store(index + occupied_len, Ordering::SeqCst);
|
||||
current_buffer
|
||||
.next_index
|
||||
.store(index + occupied_len, Ordering::SeqCst);
|
||||
chunks_in_use.push(ActualBufferChunk {
|
||||
index,
|
||||
len: occupied_len,
|
||||
num_cpu_accesses: 1,
|
||||
num_gpu_accesses: 0,
|
||||
});
|
||||
index,
|
||||
len: occupied_len,
|
||||
num_cpu_accesses: 1,
|
||||
num_gpu_accesses: 0,
|
||||
});
|
||||
|
||||
Ok(CpuBufferPoolChunk {
|
||||
// TODO: remove .clone() once non-lexical borrows land
|
||||
@ -557,10 +571,15 @@ impl<T, A> Clone for CpuBufferPoolChunk<T, A>
|
||||
{
|
||||
fn clone(&self) -> CpuBufferPoolChunk<T, A> {
|
||||
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
|
||||
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
|
||||
let chunk = chunks_in_use_lock
|
||||
.iter_mut()
|
||||
.find(|c| c.index == self.index)
|
||||
.unwrap();
|
||||
|
||||
debug_assert!(chunk.num_cpu_accesses >= 1);
|
||||
chunk.num_cpu_accesses = chunk.num_cpu_accesses.checked_add(1)
|
||||
chunk.num_cpu_accesses = chunk
|
||||
.num_cpu_accesses
|
||||
.checked_add(1)
|
||||
.expect("Overflow in CPU accesses");
|
||||
|
||||
CpuBufferPoolChunk {
|
||||
@ -601,7 +620,10 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
|
||||
}
|
||||
|
||||
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
|
||||
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
|
||||
let chunk = chunks_in_use_lock
|
||||
.iter_mut()
|
||||
.find(|c| c.index == self.index)
|
||||
.unwrap();
|
||||
|
||||
if chunk.num_gpu_accesses != 0 {
|
||||
return Err(AccessError::AlreadyInUse);
|
||||
@ -618,10 +640,15 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
|
||||
}
|
||||
|
||||
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
|
||||
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
|
||||
let chunk = chunks_in_use_lock
|
||||
.iter_mut()
|
||||
.find(|c| c.index == self.index)
|
||||
.unwrap();
|
||||
|
||||
debug_assert!(chunk.num_gpu_accesses >= 1);
|
||||
chunk.num_gpu_accesses = chunk.num_gpu_accesses.checked_add(1)
|
||||
chunk.num_gpu_accesses = chunk
|
||||
.num_gpu_accesses
|
||||
.checked_add(1)
|
||||
.expect("Overflow in GPU usages");
|
||||
}
|
||||
|
||||
@ -632,7 +659,10 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
|
||||
}
|
||||
|
||||
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
|
||||
let chunk = chunks_in_use_lock.iter_mut().find(|c| c.index == self.index).unwrap();
|
||||
let chunk = chunks_in_use_lock
|
||||
.iter_mut()
|
||||
.find(|c| c.index == self.index)
|
||||
.unwrap();
|
||||
|
||||
debug_assert!(chunk.num_gpu_accesses >= 1);
|
||||
chunk.num_gpu_accesses -= 1;
|
||||
@ -649,7 +679,10 @@ impl<T, A> Drop for CpuBufferPoolChunk<T, A>
|
||||
}
|
||||
|
||||
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
|
||||
let chunk_num = chunks_in_use_lock.iter_mut().position(|c| c.index == self.index).unwrap();
|
||||
let chunk_num = chunks_in_use_lock
|
||||
.iter_mut()
|
||||
.position(|c| c.index == self.index)
|
||||
.unwrap();
|
||||
|
||||
if chunks_in_use_lock[chunk_num].num_cpu_accesses >= 2 {
|
||||
chunks_in_use_lock[chunk_num].num_cpu_accesses -= 1;
|
||||
@ -679,9 +712,7 @@ impl<T, A> Clone for CpuBufferPoolSubbuffer<T, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
fn clone(&self) -> CpuBufferPoolSubbuffer<T, A> {
|
||||
CpuBufferPoolSubbuffer {
|
||||
chunk: self.chunk.clone(),
|
||||
}
|
||||
CpuBufferPoolSubbuffer { chunk: self.chunk.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,14 +31,14 @@ use device::DeviceOwned;
|
||||
use device::Queue;
|
||||
use instance::QueueFamily;
|
||||
use memory::DedicatedAlloc;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use memory::pool::AllocFromRequirementsFilter;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::MappingRequirement;
|
||||
use memory::pool::MemoryPool;
|
||||
use memory::pool::MemoryPoolAlloc;
|
||||
use memory::pool::PotentialDedicatedAllocation;
|
||||
use memory::pool::StdMemoryPoolAlloc;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use sync::AccessError;
|
||||
use sync::Sharing;
|
||||
|
||||
@ -222,7 +222,7 @@ unsafe impl<T: ?Sized, A> BufferAccess for DeviceLocalBuffer<T, A>
|
||||
},
|
||||
&mut GpuAccess::Exclusive { .. } => {
|
||||
Err(AccessError::AlreadyInUse)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -238,7 +238,7 @@ unsafe impl<T: ?Sized, A> BufferAccess for DeviceLocalBuffer<T, A>
|
||||
GpuAccess::Exclusive { ref mut num } => {
|
||||
debug_assert!(*num >= 1);
|
||||
*num += 1;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,8 @@ use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use device::Queue;
|
||||
use instance::QueueFamily;
|
||||
use memory::DedicatedAlloc;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use memory::pool::AllocFromRequirementsFilter;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::MappingRequirement;
|
||||
@ -48,8 +50,6 @@ use memory::pool::MemoryPool;
|
||||
use memory::pool::MemoryPoolAlloc;
|
||||
use memory::pool::PotentialDedicatedAllocation;
|
||||
use memory::pool::StdMemoryPoolAlloc;
|
||||
use memory::DedicatedAlloc;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use sync::AccessError;
|
||||
use sync::NowFuture;
|
||||
use sync::Sharing;
|
||||
@ -90,7 +90,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
/// be finished before submitting your own operation.
|
||||
pub fn from_data(
|
||||
data: T, usage: BufferUsage, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
|
||||
DeviceMemoryAllocError>
|
||||
where T: 'static + Send + Sync + Sized
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_data(queue.device().clone(),
|
||||
@ -107,7 +108,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
/// be finished before submitting your own operation.
|
||||
pub fn from_buffer<B>(
|
||||
source: B, usage: BufferUsage, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
|
||||
DeviceMemoryAllocError>
|
||||
where B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync,
|
||||
T: 'static + Send + Sync
|
||||
{
|
||||
@ -126,7 +128,7 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
let cb = AutoCommandBufferBuilder::new(source.device().clone(),
|
||||
queue.family())?
|
||||
.copy_buffer(source, init).unwrap() // TODO: return error?
|
||||
.build().unwrap(); // TODO: return OomError
|
||||
.build().unwrap(); // TODO: return OomError
|
||||
|
||||
let future = match cb.execute(queue) {
|
||||
Ok(f) => f,
|
||||
@ -158,9 +160,11 @@ impl<T> ImmutableBuffer<T> {
|
||||
#[inline]
|
||||
pub unsafe fn uninitialized(
|
||||
device: Arc<Device>, usage: BufferUsage)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
|
||||
{
|
||||
ImmutableBuffer::raw(device.clone(), mem::size_of::<T>(), usage,
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
|
||||
DeviceMemoryAllocError> {
|
||||
ImmutableBuffer::raw(device.clone(),
|
||||
mem::size_of::<T>(),
|
||||
usage,
|
||||
device.active_queue_families())
|
||||
}
|
||||
}
|
||||
@ -168,7 +172,8 @@ impl<T> ImmutableBuffer<T> {
|
||||
impl<T> ImmutableBuffer<[T]> {
|
||||
pub fn from_iter<D>(
|
||||
data: D, usage: BufferUsage, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture),
|
||||
DeviceMemoryAllocError>
|
||||
where D: ExactSizeIterator<Item = T>,
|
||||
T: 'static + Send + Sync + Sized
|
||||
{
|
||||
@ -195,12 +200,14 @@ impl<T> ImmutableBuffer<[T]> {
|
||||
/// data, otherwise the content is undefined.
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn uninitialized_array(
|
||||
device: Arc<Device>, len: usize, usage: BufferUsage)
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferInitialization<[T]>), DeviceMemoryAllocError>
|
||||
{
|
||||
ImmutableBuffer::raw(device.clone(), len * mem::size_of::<T>(),
|
||||
usage, device.active_queue_families())
|
||||
pub unsafe fn uninitialized_array(device: Arc<Device>, len: usize, usage: BufferUsage)
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>,
|
||||
ImmutableBufferInitialization<[T]>),
|
||||
DeviceMemoryAllocError> {
|
||||
ImmutableBuffer::raw(device.clone(),
|
||||
len * mem::size_of::<T>(),
|
||||
usage,
|
||||
device.active_queue_families())
|
||||
}
|
||||
}
|
||||
|
||||
@ -223,7 +230,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
#[inline]
|
||||
pub unsafe fn raw<'a, I>(
|
||||
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
|
||||
DeviceMemoryAllocError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
|
||||
@ -234,7 +242,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
// inlined.
|
||||
unsafe fn raw_impl(
|
||||
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: SmallVec<[u32; 4]>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError> {
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>),
|
||||
DeviceMemoryAllocError> {
|
||||
let (buffer, mem_reqs) = {
|
||||
let sharing = if queue_families.len() >= 2 {
|
||||
Sharing::Concurrent(queue_families.iter().cloned())
|
||||
@ -436,14 +445,10 @@ mod tests {
|
||||
fn from_data_working() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32,
|
||||
BufferUsage::all(),
|
||||
queue.clone())
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone())
|
||||
.unwrap();
|
||||
|
||||
let destination = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
0)
|
||||
let destination = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0)
|
||||
.unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
@ -471,8 +476,8 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let destination = CpuAccessibleBuffer::from_iter(device.clone(),
|
||||
BufferUsage::all(),
|
||||
(0 .. 512).map(|_| 0u32))
|
||||
BufferUsage::all(),
|
||||
(0 .. 512).map(|_| 0u32))
|
||||
.unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
@ -496,23 +501,23 @@ mod tests {
|
||||
fn writing_forbidden() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32,
|
||||
BufferUsage::all(),
|
||||
queue.clone())
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone())
|
||||
.unwrap();
|
||||
|
||||
assert_should_panic!({ // TODO: check Result error instead of panicking
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.fill_buffer(buffer, 50)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
});
|
||||
assert_should_panic!({
|
||||
// TODO: check Result error instead of panicking
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(),
|
||||
queue.family())
|
||||
.unwrap()
|
||||
.fill_buffer(buffer, 50)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -520,28 +525,25 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(),
|
||||
BufferUsage::all())
|
||||
.unwrap()
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
|
||||
};
|
||||
|
||||
let source = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
0)
|
||||
.unwrap();
|
||||
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
|
||||
|
||||
assert_should_panic!({ // TODO: check Result error instead of panicking
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(source, buffer)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
});
|
||||
assert_should_panic!({
|
||||
// TODO: check Result error instead of panicking
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(),
|
||||
queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(source, buffer)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -549,15 +551,10 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, init) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(),
|
||||
BufferUsage::all())
|
||||
.unwrap()
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
|
||||
};
|
||||
|
||||
let source = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
0)
|
||||
.unwrap();
|
||||
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
@ -579,15 +576,10 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, init) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(),
|
||||
BufferUsage::all())
|
||||
.unwrap()
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
|
||||
};
|
||||
|
||||
let source = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
0)
|
||||
.unwrap();
|
||||
let source = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0).unwrap();
|
||||
|
||||
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
@ -615,8 +607,7 @@ mod tests {
|
||||
fn create_buffer_zero_size_data() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let _ =
|
||||
ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone());
|
||||
let _ = ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone());
|
||||
}
|
||||
|
||||
// TODO: write tons of tests that try to exploit loopholes
|
||||
|
@ -135,18 +135,20 @@ impl UnsafeBuffer {
|
||||
|
||||
let mut output2 = if device.loaded_extensions().khr_dedicated_allocation {
|
||||
Some(vk::MemoryDedicatedRequirementsKHR {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
|
||||
pNext: ptr::null(),
|
||||
prefersDedicatedAllocation: mem::uninitialized(),
|
||||
requiresDedicatedAllocation: mem::uninitialized(),
|
||||
})
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
|
||||
pNext: ptr::null(),
|
||||
prefersDedicatedAllocation: mem::uninitialized(),
|
||||
requiresDedicatedAllocation: mem::uninitialized(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut output = vk::MemoryRequirements2KHR {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
|
||||
pNext: output2.as_mut().map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
|
||||
pNext: output2
|
||||
.as_mut()
|
||||
.map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
|
||||
.unwrap_or(ptr::null_mut()) as *mut _,
|
||||
memoryRequirements: mem::uninitialized(),
|
||||
};
|
||||
@ -416,8 +418,10 @@ impl From<Error> for BufferCreationError {
|
||||
#[inline]
|
||||
fn from(err: Error) -> BufferCreationError {
|
||||
match err {
|
||||
err @ Error::OutOfHostMemory => BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
|
||||
err @ Error::OutOfHostMemory =>
|
||||
BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory =>
|
||||
BufferCreationError::AllocError(DeviceMemoryAllocError::from(err)),
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
@ -462,15 +466,16 @@ mod tests {
|
||||
};
|
||||
|
||||
assert_should_panic!("Can't enable sparse residency without enabling sparse \
|
||||
binding as well", {
|
||||
let _ = unsafe {
|
||||
UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
};
|
||||
});
|
||||
binding as well",
|
||||
{
|
||||
let _ = unsafe {
|
||||
UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -483,15 +488,16 @@ mod tests {
|
||||
};
|
||||
|
||||
assert_should_panic!("Can't enable sparse aliasing without enabling sparse \
|
||||
binding as well", {
|
||||
let _ = unsafe {
|
||||
UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
};
|
||||
});
|
||||
binding as well",
|
||||
{
|
||||
let _ = unsafe {
|
||||
UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -384,10 +384,9 @@ mod tests {
|
||||
..BufferUsage::none()
|
||||
};
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::<[u32]>::from_iter((0 .. 128).map(|_| 0),
|
||||
usage,
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
let (buffer, _) =
|
||||
ImmutableBuffer::<[u32]>::from_iter((0 .. 128).map(|_| 0), usage, queue.clone())
|
||||
.unwrap();
|
||||
let view = BufferView::new(buffer, format::R32Uint).unwrap();
|
||||
|
||||
assert!(view.storage_texel_buffer());
|
||||
|
@ -12,9 +12,9 @@ use std::fmt;
|
||||
use std::iter;
|
||||
use std::mem;
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use OomError;
|
||||
use buffer::BufferAccess;
|
||||
@ -116,7 +116,7 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// executing the command buffer modifies it.
|
||||
#[inline]
|
||||
pub fn primary(device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, Kind::primary(), Flags::None)
|
||||
}
|
||||
|
||||
@ -126,10 +126,12 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// destroyed. This makes it possible for the implementation to perform additional
|
||||
/// optimizations.
|
||||
#[inline]
|
||||
pub fn primary_one_time_submit(device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
{
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, Kind::primary(),
|
||||
pub fn primary_one_time_submit(
|
||||
device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
AutoCommandBufferBuilder::with_flags(device,
|
||||
queue_family,
|
||||
Kind::primary(),
|
||||
Flags::OneTimeSubmit)
|
||||
}
|
||||
|
||||
@ -138,10 +140,12 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// Contrary to `primary`, the final command buffer can be executed multiple times in parallel
|
||||
/// in multiple different queues.
|
||||
#[inline]
|
||||
pub fn primary_simultaneous_use(device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
{
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, Kind::primary(),
|
||||
pub fn primary_simultaneous_use(
|
||||
device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
AutoCommandBufferBuilder::with_flags(device,
|
||||
queue_family,
|
||||
Kind::primary(),
|
||||
Flags::SimultaneousUse)
|
||||
}
|
||||
|
||||
@ -150,8 +154,9 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// The final command buffer can only be executed once at a time. In other words, it is as if
|
||||
/// executing the command buffer modifies it.
|
||||
#[inline]
|
||||
pub fn secondary_compute(device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
pub fn secondary_compute(
|
||||
device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
let kind = Kind::secondary(KindOcclusionQuery::Forbidden,
|
||||
QueryPipelineStatisticFlags::none());
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::None)
|
||||
@ -163,9 +168,9 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// being destroyed. This makes it possible for the implementation to perform additional
|
||||
/// optimizations.
|
||||
#[inline]
|
||||
pub fn secondary_compute_one_time_submit(device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
{
|
||||
pub fn secondary_compute_one_time_submit(
|
||||
device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
let kind = Kind::secondary(KindOcclusionQuery::Forbidden,
|
||||
QueryPipelineStatisticFlags::none());
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::OneTimeSubmit)
|
||||
@ -176,9 +181,9 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// Contrary to `secondary_compute`, the final command buffer can be executed multiple times in
|
||||
/// parallel in multiple different queues.
|
||||
#[inline]
|
||||
pub fn secondary_compute_simultaneous_use(device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
{
|
||||
pub fn secondary_compute_simultaneous_use(
|
||||
device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
let kind = Kind::secondary(KindOcclusionQuery::Forbidden,
|
||||
QueryPipelineStatisticFlags::none());
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::SimultaneousUse)
|
||||
@ -186,34 +191,30 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
|
||||
/// Same as `secondary_compute`, but allows specifying how queries are being inherited.
|
||||
#[inline]
|
||||
pub fn secondary_compute_inherit_queries(device: Arc<Device>, queue_family: QueueFamily,
|
||||
occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
pub fn secondary_compute_inherit_queries(
|
||||
device: Arc<Device>, queue_family: QueueFamily, occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
let kind = Kind::secondary(occlusion_query, query_statistics_flags);
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::None)
|
||||
}
|
||||
|
||||
/// Same as `secondary_compute_one_time_submit`, but allows specifying how queries are being inherited.
|
||||
#[inline]
|
||||
pub fn secondary_compute_one_time_submit_inherit_queries(device: Arc<Device>,
|
||||
queue_family: QueueFamily,
|
||||
occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
{
|
||||
pub fn secondary_compute_one_time_submit_inherit_queries(
|
||||
device: Arc<Device>, queue_family: QueueFamily, occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
let kind = Kind::secondary(occlusion_query, query_statistics_flags);
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::OneTimeSubmit)
|
||||
}
|
||||
|
||||
/// Same as `secondary_compute_simultaneous_use`, but allows specifying how queries are being inherited.
|
||||
#[inline]
|
||||
pub fn secondary_compute_simultaneous_use_inherit_queries(device: Arc<Device>,
|
||||
queue_family: QueueFamily,
|
||||
occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
{
|
||||
pub fn secondary_compute_simultaneous_use_inherit_queries(
|
||||
device: Arc<Device>, queue_family: QueueFamily, occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
let kind = Kind::secondary(occlusion_query, query_statistics_flags);
|
||||
AutoCommandBufferBuilder::with_flags(device, queue_family, kind, Flags::SimultaneousUse)
|
||||
}
|
||||
@ -223,15 +224,18 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// The final command buffer can only be executed once at a time. In other words, it is as if
|
||||
/// executing the command buffer modifies it.
|
||||
#[inline]
|
||||
pub fn secondary_graphics<R>(device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
pub fn secondary_graphics<R>(
|
||||
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
where R: RenderPassAbstract + Clone + Send + Sync + 'static
|
||||
{
|
||||
let kind = Kind::Secondary {
|
||||
render_pass: Some(KindSecondaryRenderPass {
|
||||
subpass,
|
||||
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
}),
|
||||
subpass,
|
||||
framebuffer:
|
||||
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
()>>,
|
||||
}),
|
||||
occlusion_query: KindOcclusionQuery::Forbidden,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags::none(),
|
||||
};
|
||||
@ -245,16 +249,18 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// before being destroyed. This makes it possible for the implementation to perform additional
|
||||
/// optimizations.
|
||||
#[inline]
|
||||
pub fn secondary_graphics_one_time_submit<R>(device: Arc<Device>, queue_family: QueueFamily,
|
||||
subpass: Subpass<R>)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
pub fn secondary_graphics_one_time_submit<R>(
|
||||
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
where R: RenderPassAbstract + Clone + Send + Sync + 'static
|
||||
{
|
||||
let kind = Kind::Secondary {
|
||||
render_pass: Some(KindSecondaryRenderPass {
|
||||
subpass,
|
||||
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
}),
|
||||
subpass,
|
||||
framebuffer:
|
||||
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
()>>,
|
||||
}),
|
||||
occlusion_query: KindOcclusionQuery::Forbidden,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags::none(),
|
||||
};
|
||||
@ -267,16 +273,18 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
/// Contrary to `secondary_graphics`, the final command buffer can be executed multiple times
|
||||
/// in parallel in multiple different queues.
|
||||
#[inline]
|
||||
pub fn secondary_graphics_simultaneous_use<R>(device: Arc<Device>, queue_family: QueueFamily,
|
||||
subpass: Subpass<R>)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
pub fn secondary_graphics_simultaneous_use<R>(
|
||||
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
where R: RenderPassAbstract + Clone + Send + Sync + 'static
|
||||
{
|
||||
let kind = Kind::Secondary {
|
||||
render_pass: Some(KindSecondaryRenderPass {
|
||||
subpass,
|
||||
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
}),
|
||||
subpass,
|
||||
framebuffer:
|
||||
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
()>>,
|
||||
}),
|
||||
occlusion_query: KindOcclusionQuery::Forbidden,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags::none(),
|
||||
};
|
||||
@ -286,18 +294,19 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
|
||||
/// Same as `secondary_graphics`, but allows specifying how queries are being inherited.
|
||||
#[inline]
|
||||
pub fn secondary_graphics_inherit_queries<R>(device: Arc<Device>, queue_family: QueueFamily,
|
||||
subpass: Subpass<R>,
|
||||
occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
pub fn secondary_graphics_inherit_queries<R>(
|
||||
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>,
|
||||
occlusion_query: KindOcclusionQuery, query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
where R: RenderPassAbstract + Clone + Send + Sync + 'static
|
||||
{
|
||||
let kind = Kind::Secondary {
|
||||
render_pass: Some(KindSecondaryRenderPass {
|
||||
subpass,
|
||||
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
}),
|
||||
subpass,
|
||||
framebuffer:
|
||||
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
()>>,
|
||||
}),
|
||||
occlusion_query,
|
||||
query_statistics_flags,
|
||||
};
|
||||
@ -307,18 +316,19 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
|
||||
/// Same as `secondary_graphics_one_time_submit`, but allows specifying how queries are being inherited.
|
||||
#[inline]
|
||||
pub fn secondary_graphics_one_time_submit_inherit_queries<R>(device: Arc<Device>,
|
||||
queue_family: QueueFamily, subpass: Subpass<R>,
|
||||
occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
pub fn secondary_graphics_one_time_submit_inherit_queries<R>(
|
||||
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>,
|
||||
occlusion_query: KindOcclusionQuery, query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
where R: RenderPassAbstract + Clone + Send + Sync + 'static
|
||||
{
|
||||
let kind = Kind::Secondary {
|
||||
render_pass: Some(KindSecondaryRenderPass {
|
||||
subpass,
|
||||
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
}),
|
||||
subpass,
|
||||
framebuffer:
|
||||
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
()>>,
|
||||
}),
|
||||
occlusion_query,
|
||||
query_statistics_flags,
|
||||
};
|
||||
@ -328,18 +338,19 @@ impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
|
||||
/// Same as `secondary_graphics_simultaneous_use`, but allows specifying how queries are being inherited.
|
||||
#[inline]
|
||||
pub fn secondary_graphics_simultaneous_use_inherit_queries<R>(device: Arc<Device>,
|
||||
queue_family: QueueFamily, subpass: Subpass<R>,
|
||||
occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
pub fn secondary_graphics_simultaneous_use_inherit_queries<R>(
|
||||
device: Arc<Device>, queue_family: QueueFamily, subpass: Subpass<R>,
|
||||
occlusion_query: KindOcclusionQuery, query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
where R: RenderPassAbstract + Clone + Send + Sync + 'static
|
||||
{
|
||||
let kind = Kind::Secondary {
|
||||
render_pass: Some(KindSecondaryRenderPass {
|
||||
subpass,
|
||||
framebuffer: None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
}),
|
||||
subpass,
|
||||
framebuffer:
|
||||
None::<Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
()>>,
|
||||
}),
|
||||
occlusion_query,
|
||||
query_statistics_flags,
|
||||
};
|
||||
@ -398,8 +409,7 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
#[inline]
|
||||
fn ensure_inside_render_pass_secondary(&self)
|
||||
-> Result<(), AutoCommandBufferBuilderContextError>
|
||||
{
|
||||
-> Result<(), AutoCommandBufferBuilderContextError> {
|
||||
if self.render_pass.is_some() {
|
||||
if self.subpass_secondary {
|
||||
Ok(())
|
||||
@ -413,7 +423,7 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
#[inline]
|
||||
fn ensure_inside_render_pass_inline<Gp>(&self, pipeline: &Gp)
|
||||
-> Result<(), AutoCommandBufferBuilderContextError>
|
||||
-> Result<(), AutoCommandBufferBuilderContextError>
|
||||
where Gp: ?Sized + GraphicsPipelineAbstract
|
||||
{
|
||||
if self.render_pass.is_none() {
|
||||
@ -459,9 +469,9 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
};
|
||||
|
||||
Ok(AutoCommandBuffer {
|
||||
inner: self.inner.build()?,
|
||||
submit_state,
|
||||
})
|
||||
inner: self.inner.build()?,
|
||||
submit_state,
|
||||
})
|
||||
}
|
||||
|
||||
/// Adds a command that enters a render pass.
|
||||
@ -489,8 +499,11 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
let clear_values = framebuffer.convert_clear_values(clear_values);
|
||||
let clear_values = clear_values.collect::<Vec<_>>().into_iter(); // TODO: necessary for Send + Sync ; needs an API rework of convert_clear_values
|
||||
let contents = if secondary { SubpassContents::SecondaryCommandBuffers }
|
||||
else { SubpassContents::Inline };
|
||||
let contents = if secondary {
|
||||
SubpassContents::SecondaryCommandBuffers
|
||||
} else {
|
||||
SubpassContents::Inline
|
||||
};
|
||||
self.inner
|
||||
.begin_render_pass(framebuffer.clone(), contents, clear_values)?;
|
||||
self.render_pass = Some((Box::new(framebuffer) as Box<_>, 0));
|
||||
@ -530,12 +543,12 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
///
|
||||
/// - Panics if the source or the destination was not created with `device`.
|
||||
///
|
||||
pub fn blit_image<S, D>(
|
||||
mut self, source: S, source_top_left: [i32; 3], source_bottom_right: [i32; 3],
|
||||
source_base_array_layer: u32, source_mip_level: u32, destination: D,
|
||||
destination_top_left: [i32; 3], destination_bottom_right: [i32; 3],
|
||||
destination_base_array_layer: u32, destination_mip_level: u32, layer_count: u32,
|
||||
filter: Filter) -> Result<Self, BlitImageError>
|
||||
pub fn blit_image<S, D>(mut self, source: S, source_top_left: [i32; 3],
|
||||
source_bottom_right: [i32; 3], source_base_array_layer: u32,
|
||||
source_mip_level: u32, destination: D, destination_top_left: [i32; 3],
|
||||
destination_bottom_right: [i32; 3], destination_base_array_layer: u32,
|
||||
destination_mip_level: u32, layer_count: u32, filter: Filter)
|
||||
-> Result<Self, BlitImageError>
|
||||
where S: ImageAccess + Send + Sync + 'static,
|
||||
D: ImageAccess + Send + Sync + 'static
|
||||
{
|
||||
@ -546,10 +559,18 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
self.ensure_outside_render_pass()?;
|
||||
|
||||
check_blit_image(self.device(), &source, source_top_left, source_bottom_right,
|
||||
source_base_array_layer, source_mip_level, &destination,
|
||||
destination_top_left, destination_bottom_right,
|
||||
destination_base_array_layer, destination_mip_level, layer_count,
|
||||
check_blit_image(self.device(),
|
||||
&source,
|
||||
source_top_left,
|
||||
source_bottom_right,
|
||||
source_base_array_layer,
|
||||
source_mip_level,
|
||||
&destination,
|
||||
destination_top_left,
|
||||
destination_bottom_right,
|
||||
destination_base_array_layer,
|
||||
destination_mip_level,
|
||||
layer_count,
|
||||
filter)?;
|
||||
|
||||
let blit = UnsafeCommandBufferBuilderImageBlit {
|
||||
@ -574,8 +595,13 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
destination_bottom_right,
|
||||
};
|
||||
|
||||
self.inner.blit_image(source, ImageLayout::TransferSrcOptimal, destination, // TODO: let choose layout
|
||||
ImageLayout::TransferDstOptimal, iter::once(blit), filter)?;
|
||||
self.inner
|
||||
.blit_image(source,
|
||||
ImageLayout::TransferSrcOptimal,
|
||||
destination, // TODO: let choose layout
|
||||
ImageLayout::TransferDstOptimal,
|
||||
iter::once(blit),
|
||||
filter)?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -589,7 +615,7 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
///
|
||||
pub fn clear_color_image<I>(self, image: I, color: ClearValue)
|
||||
-> Result<Self, ClearColorImageError>
|
||||
where I: ImageAccess + Send + Sync + 'static,
|
||||
where I: ImageAccess + Send + Sync + 'static
|
||||
{
|
||||
let layers = image.dimensions().array_layers();
|
||||
let levels = image.mipmap_levels();
|
||||
@ -606,7 +632,7 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
pub fn clear_color_image_dimensions<I>(mut self, image: I, first_layer: u32, num_layers: u32,
|
||||
first_mipmap: u32, num_mipmaps: u32, color: ClearValue)
|
||||
-> Result<Self, ClearColorImageError>
|
||||
where I: ImageAccess + Send + Sync + 'static,
|
||||
where I: ImageAccess + Send + Sync + 'static
|
||||
{
|
||||
unsafe {
|
||||
if !self.graphics_allowed && !self.compute_allowed {
|
||||
@ -614,11 +640,17 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
}
|
||||
|
||||
self.ensure_outside_render_pass()?;
|
||||
check_clear_color_image(self.device(), &image, first_layer, num_layers,
|
||||
first_mipmap, num_mipmaps)?;
|
||||
check_clear_color_image(self.device(),
|
||||
&image,
|
||||
first_layer,
|
||||
num_layers,
|
||||
first_mipmap,
|
||||
num_mipmaps)?;
|
||||
|
||||
match color {
|
||||
ClearValue::Float(_) | ClearValue::Int(_) | ClearValue::Uint(_) => {},
|
||||
ClearValue::Float(_) |
|
||||
ClearValue::Int(_) |
|
||||
ClearValue::Uint(_) => {},
|
||||
_ => panic!("The clear color is not a color value"),
|
||||
};
|
||||
|
||||
@ -630,8 +662,11 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
};
|
||||
|
||||
// TODO: let choose layout
|
||||
self.inner.clear_color_image(image, ImageLayout::TransferDstOptimal, color,
|
||||
iter::once(region))?;
|
||||
self.inner
|
||||
.clear_color_image(image,
|
||||
ImageLayout::TransferDstOptimal,
|
||||
color,
|
||||
iter::once(region))?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -641,25 +676,27 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
/// This command will copy from the source to the destination. If their size is not equal, then
|
||||
/// the amount of data copied is equal to the smallest of the two.
|
||||
#[inline]
|
||||
pub fn copy_buffer<S, D, T>(mut self, source: S, destination: D) -> Result<Self, CopyBufferError>
|
||||
pub fn copy_buffer<S, D, T>(mut self, source: S, destination: D)
|
||||
-> Result<Self, CopyBufferError>
|
||||
where S: TypedBufferAccess<Content = T> + Send + Sync + 'static,
|
||||
D: TypedBufferAccess<Content = T> + Send + Sync + 'static,
|
||||
T: ?Sized,
|
||||
T: ?Sized
|
||||
{
|
||||
unsafe {
|
||||
self.ensure_outside_render_pass()?;
|
||||
let infos = check_copy_buffer(self.device(), &source, &destination)?;
|
||||
self.inner.copy_buffer(source, destination, iter::once((0, 0, infos.copy_size)))?;
|
||||
self.inner
|
||||
.copy_buffer(source, destination, iter::once((0, 0, infos.copy_size)))?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a command that copies from a buffer to an image.
|
||||
pub fn copy_buffer_to_image<S, D, Px>(self, source: S, destination: D)
|
||||
-> Result<Self, CopyBufferImageError>
|
||||
-> Result<Self, CopyBufferImageError>
|
||||
where S: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
|
||||
D: ImageAccess + Send + Sync + 'static,
|
||||
Format: AcceptsPixels<Px>,
|
||||
Format: AcceptsPixels<Px>
|
||||
{
|
||||
self.ensure_outside_render_pass()?;
|
||||
|
||||
@ -668,19 +705,26 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
}
|
||||
|
||||
/// Adds a command that copies from a buffer to an image.
|
||||
pub fn copy_buffer_to_image_dimensions<S, D, Px>(
|
||||
mut self, source: S, destination: D, offset: [u32; 3], size: [u32; 3], first_layer: u32,
|
||||
num_layers: u32, mipmap: u32) -> Result<Self, CopyBufferImageError>
|
||||
pub fn copy_buffer_to_image_dimensions<S, D, Px>(mut self, source: S, destination: D,
|
||||
offset: [u32; 3], size: [u32; 3],
|
||||
first_layer: u32, num_layers: u32, mipmap: u32)
|
||||
-> Result<Self, CopyBufferImageError>
|
||||
where S: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
|
||||
D: ImageAccess + Send + Sync + 'static,
|
||||
Format: AcceptsPixels<Px>,
|
||||
Format: AcceptsPixels<Px>
|
||||
{
|
||||
unsafe {
|
||||
self.ensure_outside_render_pass()?;
|
||||
|
||||
check_copy_buffer_image(self.device(), &source, &destination,
|
||||
CheckCopyBufferImageTy::BufferToImage, offset, size,
|
||||
first_layer, num_layers, mipmap)?;
|
||||
check_copy_buffer_image(self.device(),
|
||||
&source,
|
||||
&destination,
|
||||
CheckCopyBufferImageTy::BufferToImage,
|
||||
offset,
|
||||
size,
|
||||
first_layer,
|
||||
num_layers,
|
||||
mipmap)?;
|
||||
|
||||
let copy = UnsafeCommandBufferBuilderBufferImageCopy {
|
||||
buffer_offset: 0,
|
||||
@ -702,18 +746,21 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
image_extent: size,
|
||||
};
|
||||
|
||||
self.inner.copy_buffer_to_image(source, destination, ImageLayout::TransferDstOptimal, // TODO: let choose layout
|
||||
iter::once(copy))?;
|
||||
self.inner
|
||||
.copy_buffer_to_image(source,
|
||||
destination,
|
||||
ImageLayout::TransferDstOptimal, // TODO: let choose layout
|
||||
iter::once(copy))?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a command that copies from an image to a buffer.
|
||||
pub fn copy_image_to_buffer<S, D, Px>(self, source: S, destination: D)
|
||||
-> Result<Self, CopyBufferImageError>
|
||||
-> Result<Self, CopyBufferImageError>
|
||||
where S: ImageAccess + Send + Sync + 'static,
|
||||
D: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
|
||||
Format: AcceptsPixels<Px>,
|
||||
Format: AcceptsPixels<Px>
|
||||
{
|
||||
self.ensure_outside_render_pass()?;
|
||||
|
||||
@ -722,19 +769,26 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
}
|
||||
|
||||
/// Adds a command that copies from an image to a buffer.
|
||||
pub fn copy_image_to_buffer_dimensions<S, D, Px>(
|
||||
mut self, source: S, destination: D, offset: [u32; 3], size: [u32; 3], first_layer: u32,
|
||||
num_layers: u32, mipmap: u32) -> Result<Self, CopyBufferImageError>
|
||||
pub fn copy_image_to_buffer_dimensions<S, D, Px>(mut self, source: S, destination: D,
|
||||
offset: [u32; 3], size: [u32; 3],
|
||||
first_layer: u32, num_layers: u32, mipmap: u32)
|
||||
-> Result<Self, CopyBufferImageError>
|
||||
where S: ImageAccess + Send + Sync + 'static,
|
||||
D: TypedBufferAccess<Content = [Px]> + Send + Sync + 'static,
|
||||
Format: AcceptsPixels<Px>,
|
||||
Format: AcceptsPixels<Px>
|
||||
{
|
||||
unsafe {
|
||||
self.ensure_outside_render_pass()?;
|
||||
|
||||
check_copy_buffer_image(self.device(), &destination, &source,
|
||||
CheckCopyBufferImageTy::ImageToBuffer, offset, size,
|
||||
first_layer, num_layers, mipmap)?;
|
||||
check_copy_buffer_image(self.device(),
|
||||
&destination,
|
||||
&source,
|
||||
CheckCopyBufferImageTy::ImageToBuffer,
|
||||
offset,
|
||||
size,
|
||||
first_layer,
|
||||
num_layers,
|
||||
mipmap)?;
|
||||
|
||||
let copy = UnsafeCommandBufferBuilderBufferImageCopy {
|
||||
buffer_offset: 0,
|
||||
@ -756,8 +810,11 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
image_extent: size,
|
||||
};
|
||||
|
||||
self.inner.copy_image_to_buffer(source, ImageLayout::TransferSrcOptimal, destination, // TODO: let choose layout
|
||||
iter::once(copy))?;
|
||||
self.inner
|
||||
.copy_image_to_buffer(source,
|
||||
ImageLayout::TransferSrcOptimal,
|
||||
destination, // TODO: let choose layout
|
||||
iter::once(copy))?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -785,7 +842,11 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
}
|
||||
|
||||
push_constants(&mut self.inner, pipeline.clone(), constants);
|
||||
descriptor_sets(&mut self.inner, &mut self.state_cacher, false, pipeline.clone(), sets)?;
|
||||
descriptor_sets(&mut self.inner,
|
||||
&mut self.state_cacher,
|
||||
false,
|
||||
pipeline.clone(),
|
||||
sets)?;
|
||||
|
||||
self.inner.dispatch(dimensions);
|
||||
Ok(self)
|
||||
@ -794,7 +855,8 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
#[inline]
|
||||
pub fn draw<V, Gp, S, Pc>(mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, sets: S,
|
||||
constants: Pc) -> Result<Self, DrawError>
|
||||
constants: Pc)
|
||||
-> Result<Self, DrawError>
|
||||
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
|
||||
S: DescriptorSetsCollection
|
||||
{
|
||||
@ -817,22 +879,29 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
push_constants(&mut self.inner, pipeline.clone(), constants);
|
||||
set_state(&mut self.inner, dynamic);
|
||||
descriptor_sets(&mut self.inner, &mut self.state_cacher, true, pipeline.clone(), sets)?;
|
||||
vertex_buffers(&mut self.inner, &mut self.state_cacher, vb_infos.vertex_buffers)?;
|
||||
descriptor_sets(&mut self.inner,
|
||||
&mut self.state_cacher,
|
||||
true,
|
||||
pipeline.clone(),
|
||||
sets)?;
|
||||
vertex_buffers(&mut self.inner,
|
||||
&mut self.state_cacher,
|
||||
vb_infos.vertex_buffers)?;
|
||||
|
||||
debug_assert!(self.graphics_allowed);
|
||||
|
||||
self.inner
|
||||
.draw(vb_infos.vertex_count as u32, vb_infos.instance_count as u32, 0, 0);
|
||||
self.inner.draw(vb_infos.vertex_count as u32,
|
||||
vb_infos.instance_count as u32,
|
||||
0,
|
||||
0);
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(
|
||||
mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, index_buffer: Ib, sets: S,
|
||||
constants: Pc)
|
||||
-> Result<Self, DrawIndexedError>
|
||||
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(mut self, pipeline: Gp, dynamic: DynamicState,
|
||||
vertices: V, index_buffer: Ib, sets: S, constants: Pc)
|
||||
-> Result<Self, DrawIndexedError>
|
||||
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
|
||||
S: DescriptorSetsCollection,
|
||||
Ib: BufferAccess + TypedBufferAccess<Content = [I]> + Send + Sync + 'static,
|
||||
@ -864,13 +933,20 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
push_constants(&mut self.inner, pipeline.clone(), constants);
|
||||
set_state(&mut self.inner, dynamic);
|
||||
descriptor_sets(&mut self.inner, &mut self.state_cacher, true, pipeline.clone(), sets)?;
|
||||
vertex_buffers(&mut self.inner, &mut self.state_cacher, vb_infos.vertex_buffers)?;
|
||||
descriptor_sets(&mut self.inner,
|
||||
&mut self.state_cacher,
|
||||
true,
|
||||
pipeline.clone(),
|
||||
sets)?;
|
||||
vertex_buffers(&mut self.inner,
|
||||
&mut self.state_cacher,
|
||||
vb_infos.vertex_buffers)?;
|
||||
// TODO: how to handle an index out of range of the vertex buffers?
|
||||
|
||||
debug_assert!(self.graphics_allowed);
|
||||
|
||||
self.inner.draw_indexed(ib_infos.num_indices as u32, 1, 0, 0, 0);
|
||||
self.inner
|
||||
.draw_indexed(ib_infos.num_indices as u32, 1, 0, 0, 0);
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -908,14 +984,21 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
push_constants(&mut self.inner, pipeline.clone(), constants);
|
||||
set_state(&mut self.inner, dynamic);
|
||||
descriptor_sets(&mut self.inner, &mut self.state_cacher, true, pipeline.clone(), sets)?;
|
||||
vertex_buffers(&mut self.inner, &mut self.state_cacher, vb_infos.vertex_buffers)?;
|
||||
descriptor_sets(&mut self.inner,
|
||||
&mut self.state_cacher,
|
||||
true,
|
||||
pipeline.clone(),
|
||||
sets)?;
|
||||
vertex_buffers(&mut self.inner,
|
||||
&mut self.state_cacher,
|
||||
vb_infos.vertex_buffers)?;
|
||||
|
||||
debug_assert!(self.graphics_allowed);
|
||||
|
||||
self.inner.draw_indirect(indirect_buffer,
|
||||
draw_count,
|
||||
mem::size_of::<DrawIndirectCommand>() as u32)?;
|
||||
self.inner
|
||||
.draw_indirect(indirect_buffer,
|
||||
draw_count,
|
||||
mem::size_of::<DrawIndirectCommand>() as u32)?;
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -932,16 +1015,15 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
}
|
||||
|
||||
match self.render_pass {
|
||||
Some((ref rp, index))
|
||||
if rp.num_subpasses() as u32 == index + 1 => (),
|
||||
Some((ref rp, index)) if rp.num_subpasses() as u32 == index + 1 => (),
|
||||
None => {
|
||||
return Err(AutoCommandBufferBuilderContextError::ForbiddenOutsideRenderPass);
|
||||
},
|
||||
Some((ref rp, index)) => {
|
||||
return Err(AutoCommandBufferBuilderContextError::NumSubpassesMismatch {
|
||||
actual: rp.num_subpasses() as u32,
|
||||
current: index,
|
||||
});
|
||||
actual: rp.num_subpasses() as u32,
|
||||
current: index,
|
||||
});
|
||||
},
|
||||
}
|
||||
|
||||
@ -1006,14 +1088,14 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
match self.render_pass {
|
||||
None => {
|
||||
return Err(AutoCommandBufferBuilderContextError::ForbiddenOutsideRenderPass)
|
||||
return Err(AutoCommandBufferBuilderContextError::ForbiddenOutsideRenderPass);
|
||||
},
|
||||
Some((ref rp, ref mut index)) => {
|
||||
if *index + 1 >= rp.num_subpasses() as u32 {
|
||||
return Err(AutoCommandBufferBuilderContextError::NumSubpassesMismatch {
|
||||
actual: rp.num_subpasses() as u32,
|
||||
current: *index,
|
||||
});
|
||||
actual: rp.num_subpasses() as u32,
|
||||
current: *index,
|
||||
});
|
||||
} else {
|
||||
*index += 1;
|
||||
}
|
||||
@ -1024,8 +1106,11 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
debug_assert!(self.graphics_allowed);
|
||||
|
||||
let contents = if secondary { SubpassContents::SecondaryCommandBuffers }
|
||||
else { SubpassContents::Inline };
|
||||
let contents = if secondary {
|
||||
SubpassContents::SecondaryCommandBuffers
|
||||
} else {
|
||||
SubpassContents::Inline
|
||||
};
|
||||
self.inner.next_subpass(contents);
|
||||
Ok(self)
|
||||
}
|
||||
@ -1084,10 +1169,10 @@ unsafe fn push_constants<P, Pl, Pc>(destination: &mut SyncCommandBufferBuilder<P
|
||||
range.size as usize);
|
||||
|
||||
destination.push_constants::<_, [u8]>(pipeline.clone(),
|
||||
range.stages,
|
||||
range.offset as u32,
|
||||
range.size as u32,
|
||||
data);
|
||||
range.stages,
|
||||
range.offset as u32,
|
||||
range.size as u32,
|
||||
data);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1110,8 +1195,7 @@ unsafe fn set_state<P>(destination: &mut SyncCommandBufferBuilder<P>, dynamic: D
|
||||
unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
|
||||
state_cacher: &mut StateCacher,
|
||||
vertex_buffers: Vec<Box<BufferAccess + Send + Sync>>)
|
||||
-> Result<(), SyncCommandBufferBuilderError>
|
||||
{
|
||||
-> Result<(), SyncCommandBufferBuilderError> {
|
||||
let binding_range = {
|
||||
let mut compare = state_cacher.bind_vertex_buffers();
|
||||
for vb in vertex_buffers.iter() {
|
||||
@ -1119,7 +1203,7 @@ unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
|
||||
}
|
||||
match compare.compare() {
|
||||
Some(r) => r,
|
||||
None => return Ok(())
|
||||
None => return Ok(()),
|
||||
}
|
||||
};
|
||||
|
||||
@ -1127,7 +1211,11 @@ unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
|
||||
let num_bindings = binding_range.end - binding_range.start;
|
||||
|
||||
let mut binder = destination.bind_vertex_buffers();
|
||||
for vb in vertex_buffers.into_iter().skip(first_binding as usize).take(num_bindings as usize) {
|
||||
for vb in vertex_buffers
|
||||
.into_iter()
|
||||
.skip(first_binding as usize)
|
||||
.take(num_bindings as usize)
|
||||
{
|
||||
binder.add(vb);
|
||||
}
|
||||
binder.submit(first_binding)?;
|
||||
@ -1135,8 +1223,8 @@ unsafe fn vertex_buffers<P>(destination: &mut SyncCommandBufferBuilder<P>,
|
||||
}
|
||||
|
||||
unsafe fn descriptor_sets<P, Pl, S>(destination: &mut SyncCommandBufferBuilder<P>,
|
||||
state_cacher: &mut StateCacher,
|
||||
gfx: bool, pipeline: Pl, sets: S)
|
||||
state_cacher: &mut StateCacher, gfx: bool, pipeline: Pl,
|
||||
sets: S)
|
||||
-> Result<(), SyncCommandBufferBuilderError>
|
||||
where Pl: PipelineLayoutAbstract + Send + Sync + Clone + 'static,
|
||||
S: DescriptorSetsCollection
|
||||
@ -1160,7 +1248,8 @@ unsafe fn descriptor_sets<P, Pl, S>(destination: &mut SyncCommandBufferBuilder<P
|
||||
for set in sets.into_iter().skip(first_binding as usize) {
|
||||
sets_binder.add(set);
|
||||
}
|
||||
sets_binder.submit(gfx, pipeline.clone(), first_binding, iter::empty())?;
|
||||
sets_binder
|
||||
.submit(gfx, pipeline.clone(), first_binding, iter::empty())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -1201,8 +1290,7 @@ unsafe impl<P> CommandBuffer for AutoCommandBuffer<P> {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn lock_submit(&self, future: &GpuFuture, queue: &Queue)
|
||||
-> Result<(), CommandBufferExecError> {
|
||||
fn lock_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
|
||||
match self.submit_state {
|
||||
SubmitState::OneTime { ref already_submitted } => {
|
||||
let was_already_submitted = already_submitted.swap(true, Ordering::SeqCst);
|
||||
@ -1279,7 +1367,7 @@ unsafe impl<P> DeviceOwned for AutoCommandBuffer<P> {
|
||||
}
|
||||
|
||||
macro_rules! err_gen {
|
||||
($name:ident { $($err:ident),+ }) => (
|
||||
($name:ident { $($err:ident,)+ }) => (
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum $name {
|
||||
$(
|
||||
@ -1328,89 +1416,89 @@ macro_rules! err_gen {
|
||||
}
|
||||
|
||||
err_gen!(BuildError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
OomError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
OomError,
|
||||
});
|
||||
|
||||
err_gen!(BeginRenderPassError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(BlitImageError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckBlitImageError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckBlitImageError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(ClearColorImageError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckClearColorImageError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckClearColorImageError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(CopyBufferError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckCopyBufferError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckCopyBufferError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(CopyBufferImageError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckCopyBufferImageError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckCopyBufferImageError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(FillBufferError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckFillBufferError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckFillBufferError,
|
||||
});
|
||||
|
||||
err_gen!(DispatchError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckDispatchError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckDispatchError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(DrawError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckDynamicStateValidityError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckVertexBufferError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckDynamicStateValidityError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckVertexBufferError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(DrawIndexedError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckDynamicStateValidityError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckVertexBufferError,
|
||||
CheckIndexBufferError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckDynamicStateValidityError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckVertexBufferError,
|
||||
CheckIndexBufferError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(DrawIndirectError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckDynamicStateValidityError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckVertexBufferError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckDynamicStateValidityError,
|
||||
CheckPushConstantsValidityError,
|
||||
CheckDescriptorSetsValidityError,
|
||||
CheckVertexBufferError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(ExecuteCommandsError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
SyncCommandBufferBuilderError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
SyncCommandBufferBuilderError,
|
||||
});
|
||||
|
||||
err_gen!(UpdateBufferError {
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckUpdateBufferError
|
||||
});
|
||||
AutoCommandBufferBuilderContextError,
|
||||
CheckUpdateBufferError,
|
||||
});
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum AutoCommandBufferBuilderContextError {
|
||||
|
@ -126,15 +126,15 @@ unsafe impl CommandPool for Arc<StandardCommandPool> {
|
||||
for _ in 0 .. count as usize {
|
||||
if let Some(cmd) = existing.try_pop() {
|
||||
output.push(StandardCommandPoolBuilder {
|
||||
inner: StandardCommandPoolAlloc {
|
||||
cmd: Some(cmd),
|
||||
pool: per_thread.clone(),
|
||||
pool_parent: self.clone(),
|
||||
secondary: secondary,
|
||||
device: self.device.clone(),
|
||||
},
|
||||
dummy_avoid_send_sync: PhantomData,
|
||||
});
|
||||
inner: StandardCommandPoolAlloc {
|
||||
cmd: Some(cmd),
|
||||
pool: per_thread.clone(),
|
||||
pool_parent: self.clone(),
|
||||
secondary: secondary,
|
||||
device: self.device.clone(),
|
||||
},
|
||||
dummy_avoid_send_sync: PhantomData,
|
||||
});
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
@ -148,15 +148,15 @@ unsafe impl CommandPool for Arc<StandardCommandPool> {
|
||||
|
||||
for cmd in pool_lock.alloc_command_buffers(secondary, num_new)? {
|
||||
output.push(StandardCommandPoolBuilder {
|
||||
inner: StandardCommandPoolAlloc {
|
||||
cmd: Some(cmd),
|
||||
pool: per_thread.clone(),
|
||||
pool_parent: self.clone(),
|
||||
secondary: secondary,
|
||||
device: self.device.clone(),
|
||||
},
|
||||
dummy_avoid_send_sync: PhantomData,
|
||||
});
|
||||
inner: StandardCommandPoolAlloc {
|
||||
cmd: Some(cmd),
|
||||
pool: per_thread.clone(),
|
||||
pool_parent: self.clone(),
|
||||
secondary: secondary,
|
||||
device: self.device.clone(),
|
||||
},
|
||||
dummy_avoid_send_sync: PhantomData,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,12 +274,12 @@ impl Drop for StandardCommandPoolAlloc {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use VulkanObject;
|
||||
use command_buffer::pool::CommandPool;
|
||||
use command_buffer::pool::CommandPoolBuilderAlloc;
|
||||
use command_buffer::pool::StandardCommandPool;
|
||||
use device::Device;
|
||||
use std::sync::Arc;
|
||||
use VulkanObject;
|
||||
|
||||
#[test]
|
||||
fn reuse_command_buffers() {
|
||||
|
@ -264,9 +264,7 @@ impl Iterator for UnsafeCommandPoolAllocIter {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<UnsafeCommandPoolAlloc> {
|
||||
self.list
|
||||
.next()
|
||||
.map(|cb| UnsafeCommandPoolAlloc(cb))
|
||||
self.list.next().map(|cb| UnsafeCommandPoolAlloc(cb))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -7,15 +7,15 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::ops::Range;
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use command_buffer::DynamicState;
|
||||
use descriptor::DescriptorSet;
|
||||
use pipeline::input_assembly::IndexType;
|
||||
use pipeline::ComputePipelineAbstract;
|
||||
use pipeline::GraphicsPipelineAbstract;
|
||||
use pipeline::input_assembly::IndexType;
|
||||
use smallvec::SmallVec;
|
||||
use std::ops::Range;
|
||||
use vk;
|
||||
|
||||
/// Keep track of the state of a command buffer builder, so that you don't need to bind objects
|
||||
@ -342,9 +342,9 @@ impl<'s> StateCacherVertexBuffers<'s> {
|
||||
self.state.truncate(self.offset);
|
||||
|
||||
self.first_diff.map(|first| {
|
||||
debug_assert!(first <= self.last_diff);
|
||||
first .. (self.last_diff + 1)
|
||||
})
|
||||
debug_assert!(first <= self.last_diff);
|
||||
first .. (self.last_diff + 1)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -359,9 +359,9 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
const EMPTY: [i32; 0] = [];
|
||||
let buf = CpuAccessibleBuffer::from_data(device,
|
||||
BufferUsage::vertex_buffer(),
|
||||
EMPTY.iter()).unwrap();
|
||||
let buf =
|
||||
CpuAccessibleBuffer::from_data(device, BufferUsage::vertex_buffer(), EMPTY.iter())
|
||||
.unwrap();
|
||||
|
||||
let mut cacher = StateCacher::new();
|
||||
|
||||
@ -383,9 +383,9 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
const EMPTY: [i32; 0] = [];
|
||||
let buf = CpuAccessibleBuffer::from_data(device,
|
||||
BufferUsage::vertex_buffer(),
|
||||
EMPTY.iter()).unwrap();
|
||||
let buf =
|
||||
CpuAccessibleBuffer::from_data(device, BufferUsage::vertex_buffer(), EMPTY.iter())
|
||||
.unwrap();
|
||||
|
||||
let mut cacher = StateCacher::new();
|
||||
|
||||
@ -417,13 +417,15 @@ mod tests {
|
||||
const EMPTY: [i32; 0] = [];
|
||||
let buf1 = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::vertex_buffer(),
|
||||
EMPTY.iter()).unwrap();
|
||||
EMPTY.iter())
|
||||
.unwrap();
|
||||
let buf2 = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::vertex_buffer(),
|
||||
EMPTY.iter()).unwrap();
|
||||
let buf3 = CpuAccessibleBuffer::from_data(device,
|
||||
BufferUsage::vertex_buffer(),
|
||||
EMPTY.iter()).unwrap();
|
||||
EMPTY.iter())
|
||||
.unwrap();
|
||||
let buf3 =
|
||||
CpuAccessibleBuffer::from_data(device, BufferUsage::vertex_buffer(), EMPTY.iter())
|
||||
.unwrap();
|
||||
|
||||
let mut cacher = StateCacher::new();
|
||||
|
||||
|
@ -181,7 +181,7 @@ impl<'a> SubmitPresentBuilder<'a> {
|
||||
|
||||
// TODO: AMD driver initially didn't write the results ; check that it's been fixed
|
||||
//for result in results {
|
||||
//try!(check_errors(result));
|
||||
//try!(check_errors(result));
|
||||
//}
|
||||
|
||||
Ok(())
|
||||
|
@ -401,12 +401,9 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
/// any existing resource usage.
|
||||
#[inline]
|
||||
pub unsafe fn from_unsafe_cmd(cmd: UnsafeCommandBufferBuilder<P>, is_secondary: bool,
|
||||
inside_render_pass: bool) -> SyncCommandBufferBuilder<P> {
|
||||
let latest_render_pass_enter = if inside_render_pass {
|
||||
Some(0)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
inside_render_pass: bool)
|
||||
-> SyncCommandBufferBuilder<P> {
|
||||
let latest_render_pass_enter = if inside_render_pass { Some(0) } else { None };
|
||||
|
||||
SyncCommandBufferBuilder {
|
||||
inner: cmd,
|
||||
@ -465,10 +462,10 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
// in when the command starts, and the image layout that the image will be transitionned to
|
||||
// during the command. When it comes to buffers, you should pass `Undefined` for both.
|
||||
pub(super) fn prev_cmd_resource(&mut self, resource_ty: KeyTy, resource_index: usize,
|
||||
exclusive: bool, stages: PipelineStages,
|
||||
access: AccessFlagBits, start_layout: ImageLayout,
|
||||
end_layout: ImageLayout)
|
||||
-> Result<(), SyncCommandBufferBuilderError> {
|
||||
exclusive: bool, stages: PipelineStages,
|
||||
access: AccessFlagBits, start_layout: ImageLayout,
|
||||
end_layout: ImageLayout)
|
||||
-> Result<(), SyncCommandBufferBuilderError> {
|
||||
// Anti-dumbness checks.
|
||||
debug_assert!(exclusive || start_layout == end_layout);
|
||||
debug_assert!(access.is_compatible_with(&stages));
|
||||
@ -494,7 +491,6 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
// throughout the function.
|
||||
match self.resources.entry(key) {
|
||||
|
||||
|
||||
// Situation where this resource was used before in this command buffer.
|
||||
Entry::Occupied(entry) => {
|
||||
// `collision_cmd_id` contains the ID of the command that we are potentially
|
||||
@ -524,7 +520,9 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
{
|
||||
let mut commands_lock = self.commands.lock().unwrap();
|
||||
let start = commands_lock.first_unflushed;
|
||||
let end = if let Some(rp_enter) = commands_lock.latest_render_pass_enter {
|
||||
let end = if let Some(rp_enter) = commands_lock
|
||||
.latest_render_pass_enter
|
||||
{
|
||||
rp_enter
|
||||
} else {
|
||||
latest_command_id
|
||||
@ -533,20 +531,23 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
let cmd1 = &commands_lock.commands[collision_cmd_id];
|
||||
let cmd2 = &commands_lock.commands[latest_command_id];
|
||||
return Err(SyncCommandBufferBuilderError::Conflict {
|
||||
command1_name: cmd1.name(),
|
||||
command1_param: match entry_key_resource_ty {
|
||||
KeyTy::Buffer => cmd1.buffer_name(entry_key_resource_index),
|
||||
KeyTy::Image => cmd1.image_name(entry_key_resource_index),
|
||||
},
|
||||
command1_offset: collision_cmd_id,
|
||||
command1_name: cmd1.name(),
|
||||
command1_param: match entry_key_resource_ty {
|
||||
KeyTy::Buffer => cmd1.buffer_name(entry_key_resource_index),
|
||||
KeyTy::Image =>
|
||||
cmd1.image_name(entry_key_resource_index),
|
||||
},
|
||||
command1_offset: collision_cmd_id,
|
||||
|
||||
command2_name: cmd2.name(),
|
||||
command2_param: match resource_ty {
|
||||
KeyTy::Buffer => cmd2.buffer_name(resource_index),
|
||||
KeyTy::Image => cmd2.image_name(resource_index),
|
||||
},
|
||||
command2_offset: latest_command_id,
|
||||
});
|
||||
command2_name: cmd2.name(),
|
||||
command2_param: match resource_ty {
|
||||
KeyTy::Buffer =>
|
||||
cmd2.buffer_name(resource_index),
|
||||
KeyTy::Image =>
|
||||
cmd2.image_name(resource_index),
|
||||
},
|
||||
command2_offset: latest_command_id,
|
||||
});
|
||||
}
|
||||
for command in &mut commands_lock.commands[start .. end] {
|
||||
command.send(&mut self.inner);
|
||||
@ -617,7 +618,6 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
}
|
||||
},
|
||||
|
||||
|
||||
// Situation where this is the first time we use this resource in this command buffer.
|
||||
Entry::Vacant(entry) => {
|
||||
// We need to perform some tweaks if the initial layout requirement of the image
|
||||
@ -689,7 +689,7 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
{
|
||||
let mut commands_lock = self.commands.lock().unwrap();
|
||||
debug_assert!(commands_lock.latest_render_pass_enter.is_none() ||
|
||||
self.pending_barrier.is_empty());
|
||||
self.pending_barrier.is_empty());
|
||||
|
||||
// The commands that haven't been sent to the inner command buffer yet need to be sent.
|
||||
unsafe {
|
||||
@ -718,19 +718,19 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
}
|
||||
|
||||
barrier.add_image_memory_barrier(img,
|
||||
0 .. img.mipmap_levels(),
|
||||
0 .. img.dimensions().array_layers(),
|
||||
state.stages,
|
||||
state.access,
|
||||
PipelineStages {
|
||||
top_of_pipe: true,
|
||||
..PipelineStages::none()
|
||||
},
|
||||
AccessFlagBits::none(),
|
||||
true,
|
||||
None, // TODO: queue transfers?
|
||||
state.current_layout,
|
||||
requested_layout);
|
||||
0 .. img.mipmap_levels(),
|
||||
0 .. img.dimensions().array_layers(),
|
||||
state.stages,
|
||||
state.access,
|
||||
PipelineStages {
|
||||
top_of_pipe: true,
|
||||
..PipelineStages::none()
|
||||
},
|
||||
AccessFlagBits::none(),
|
||||
true,
|
||||
None, // TODO: queue transfers?
|
||||
state.current_layout,
|
||||
requested_layout);
|
||||
|
||||
state.exclusive_any = true;
|
||||
state.current_layout = requested_layout;
|
||||
@ -1123,13 +1123,17 @@ impl<P> SyncCommandBuffer<P> {
|
||||
KeyTy::Buffer => {
|
||||
let cmd = &commands_lock[command_id];
|
||||
let buf = cmd.buffer(resource_index);
|
||||
unsafe { buf.unlock(); }
|
||||
unsafe {
|
||||
buf.unlock();
|
||||
}
|
||||
},
|
||||
|
||||
KeyTy::Image => {
|
||||
let cmd = &commands_lock[command_id];
|
||||
let img = cmd.image(resource_index);
|
||||
unsafe { img.unlock(); }
|
||||
unsafe {
|
||||
img.unlock();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1203,9 +1207,9 @@ impl<P> SyncCommandBuffer<P> {
|
||||
///
|
||||
/// > **Note**: Suitable when implementing the `CommandBuffer` trait.
|
||||
#[inline]
|
||||
pub fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
|
||||
queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
pub fn check_image_access(
|
||||
&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
// TODO: check the queue family
|
||||
|
||||
if let Some(value) = self.resources.get(&CbKey::ImageRef(image)) {
|
||||
|
@ -312,9 +312,12 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
}
|
||||
|
||||
unsafe fn send(&mut self, out: &mut UnsafeCommandBufferBuilder<P>) {
|
||||
out.blit_image(self.source.as_ref().unwrap(), self.source_layout,
|
||||
self.destination.as_ref().unwrap(), self.destination_layout,
|
||||
self.regions.take().unwrap(), self.filter);
|
||||
out.blit_image(self.source.as_ref().unwrap(),
|
||||
self.source_layout,
|
||||
self.destination.as_ref().unwrap(),
|
||||
self.destination_layout,
|
||||
self.regions.take().unwrap(),
|
||||
self.filter);
|
||||
}
|
||||
|
||||
fn into_final_command(mut self: Box<Self>) -> Box<FinalCommand + Send + Sync> {
|
||||
@ -417,21 +420,26 @@ impl<P> SyncCommandBufferBuilder<P> {
|
||||
|
||||
impl<P, I, R> Command<P> for Cmd<I, R>
|
||||
where I: ImageAccess + Send + Sync + 'static,
|
||||
R: Iterator<Item = UnsafeCommandBufferBuilderColorImageClear> + Send + Sync + 'static
|
||||
R: Iterator<Item = UnsafeCommandBufferBuilderColorImageClear>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
{
|
||||
fn name(&self) -> &'static str {
|
||||
"vkCmdClearColorImage"
|
||||
}
|
||||
|
||||
unsafe fn send(&mut self, out: &mut UnsafeCommandBufferBuilder<P>) {
|
||||
out.clear_color_image(self.image.as_ref().unwrap(), self.layout, self.color,
|
||||
out.clear_color_image(self.image.as_ref().unwrap(),
|
||||
self.layout,
|
||||
self.color,
|
||||
self.regions.take().unwrap());
|
||||
}
|
||||
|
||||
fn into_final_command(mut self: Box<Self>) -> Box<FinalCommand + Send + Sync> {
|
||||
struct Fin<I>(I);
|
||||
impl<I> FinalCommand for Fin<I>
|
||||
where I: ImageAccess + Send + Sync + 'static,
|
||||
where I: ImageAccess + Send + Sync + 'static
|
||||
{
|
||||
fn image(&self, num: usize) -> &ImageAccess {
|
||||
assert_eq!(num, 0);
|
||||
@ -1661,7 +1669,8 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
|
||||
fn buffer_name(&self, mut num: usize) -> Cow<'static, str> {
|
||||
for (set_num, set) in self.inner.iter().enumerate() {
|
||||
if let Some(buf) = set.buffer(num) {
|
||||
return format!("Buffer bound to descriptor {} of set {}", buf.1, set_num).into();
|
||||
return format!("Buffer bound to descriptor {} of set {}", buf.1, set_num)
|
||||
.into();
|
||||
}
|
||||
num -= set.num_buffers();
|
||||
}
|
||||
@ -1681,7 +1690,8 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
|
||||
fn image_name(&self, mut num: usize) -> Cow<'static, str> {
|
||||
for (set_num, set) in self.inner.iter().enumerate() {
|
||||
if let Some(img) = set.image(num) {
|
||||
return format!("Image bound to descriptor {} of set {}", img.1, set_num).into();
|
||||
return format!("Image bound to descriptor {} of set {}", img.1, set_num)
|
||||
.into();
|
||||
}
|
||||
num -= set.num_images();
|
||||
}
|
||||
@ -1693,7 +1703,8 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
|
||||
let mut all_buffers = Vec::new();
|
||||
for ds in self.inner.iter() {
|
||||
for buf_num in 0 .. ds.num_buffers() {
|
||||
let desc = ds.descriptor(ds.buffer(buf_num).unwrap().1 as usize).unwrap();
|
||||
let desc = ds.descriptor(ds.buffer(buf_num).unwrap().1 as usize)
|
||||
.unwrap();
|
||||
let write = !desc.readonly;
|
||||
let (stages, access) = desc.pipeline_stages_and_access();
|
||||
all_buffers.push((write, stages, access));
|
||||
@ -1731,7 +1742,7 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
|
||||
ignore_me_hack = true;
|
||||
image_view.descriptor_set_input_attachment_layout()
|
||||
},
|
||||
_ => panic!("Tried to bind an image to a non-image descriptor")
|
||||
_ => panic!("Tried to bind an image to a non-image descriptor"),
|
||||
};
|
||||
all_images.push((write, stages, access, layout, ignore_me_hack));
|
||||
}
|
||||
@ -1739,30 +1750,33 @@ impl<'b, P> SyncCommandBufferBuilderBindDescriptorSets<'b, P> {
|
||||
all_images
|
||||
};
|
||||
|
||||
self.builder
|
||||
.append_command(Cmd {
|
||||
inner: self.inner,
|
||||
graphics,
|
||||
pipeline_layout,
|
||||
first_binding,
|
||||
dynamic_offsets: Some(dynamic_offsets),
|
||||
});
|
||||
self.builder.append_command(Cmd {
|
||||
inner: self.inner,
|
||||
graphics,
|
||||
pipeline_layout,
|
||||
first_binding,
|
||||
dynamic_offsets: Some(dynamic_offsets),
|
||||
});
|
||||
|
||||
for (n, (write, stages, access)) in all_buffers.into_iter().enumerate() {
|
||||
self.builder
|
||||
.prev_cmd_resource(KeyTy::Buffer,
|
||||
n, write, stages, access,
|
||||
n,
|
||||
write,
|
||||
stages,
|
||||
access,
|
||||
ImageLayout::Undefined,
|
||||
ImageLayout::Undefined)?;
|
||||
}
|
||||
|
||||
for (n, (write, stages, access, layout, ignore_me_hack)) in all_images.into_iter().enumerate() {
|
||||
if ignore_me_hack { continue; }
|
||||
for (n, (write, stages, access, layout, ignore_me_hack)) in
|
||||
all_images.into_iter().enumerate()
|
||||
{
|
||||
if ignore_me_hack {
|
||||
continue;
|
||||
}
|
||||
self.builder
|
||||
.prev_cmd_resource(KeyTy::Image,
|
||||
n, write, stages, access,
|
||||
layout,
|
||||
layout)?;
|
||||
.prev_cmd_resource(KeyTy::Image, n, write, stages, access, layout, layout)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -1824,12 +1838,11 @@ impl<'a, P> SyncCommandBufferBuilderBindVertexBuffer<'a, P> {
|
||||
|
||||
let num_buffers = self.buffers.len();
|
||||
|
||||
self.builder
|
||||
.append_command(Cmd {
|
||||
first_binding,
|
||||
inner: Some(self.inner),
|
||||
buffers: self.buffers,
|
||||
});
|
||||
self.builder.append_command(Cmd {
|
||||
first_binding,
|
||||
inner: Some(self.inner),
|
||||
buffers: self.buffers,
|
||||
});
|
||||
|
||||
for n in 0 .. num_buffers {
|
||||
self.builder
|
||||
@ -1867,7 +1880,8 @@ impl<'a, P> SyncCommandBufferBuilderExecuteCommands<'a, P> {
|
||||
where C: CommandBuffer + Send + Sync + 'static
|
||||
{
|
||||
self.inner.add(&command_buffer);
|
||||
self.command_buffers.push(Box::new(command_buffer) as Box<_>);
|
||||
self.command_buffers
|
||||
.push(Box::new(command_buffer) as Box<_>);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -1894,11 +1908,10 @@ impl<'a, P> SyncCommandBufferBuilderExecuteCommands<'a, P> {
|
||||
}
|
||||
}
|
||||
|
||||
self.builder
|
||||
.append_command(Cmd {
|
||||
inner: Some(self.inner),
|
||||
command_buffers: self.command_buffers,
|
||||
});
|
||||
self.builder.append_command(Cmd {
|
||||
inner: Some(self.inner),
|
||||
command_buffers: self.command_buffers,
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -34,13 +34,11 @@ fn basic_conflict() {
|
||||
let pool = Device::standard_command_pool(&device, queue.family());
|
||||
let mut sync = SyncCommandBufferBuilder::new(&pool, Kind::primary(), Flags::None).unwrap();
|
||||
|
||||
let buf = CpuAccessibleBuffer::from_data(device,
|
||||
BufferUsage::all(),
|
||||
0u32).unwrap();
|
||||
let buf = CpuAccessibleBuffer::from_data(device, BufferUsage::all(), 0u32).unwrap();
|
||||
|
||||
match sync.copy_buffer(buf.clone(), buf.clone(), iter::once((0, 0, 4))) {
|
||||
Err(SyncCommandBufferBuilderError::Conflict { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -131,9 +131,8 @@ impl
|
||||
#[inline]
|
||||
pub fn secondary(occlusion_query: KindOcclusionQuery,
|
||||
query_statistics_flags: QueryPipelineStatisticFlags)
|
||||
-> Kind<Arc<RenderPass<EmptySinglePassRenderPassDesc>>,
|
||||
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
{
|
||||
-> Kind<Arc<RenderPass<EmptySinglePassRenderPassDesc>>,
|
||||
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
|
||||
Kind::Secondary {
|
||||
render_pass: None,
|
||||
occlusion_query,
|
||||
@ -267,20 +266,27 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
};
|
||||
(rp, sp, fb)
|
||||
},
|
||||
_ => (0, 0, 0)
|
||||
_ => (0, 0, 0),
|
||||
};
|
||||
|
||||
let (oqe, qf, ps) = match kind {
|
||||
Kind::Secondary { occlusion_query, query_statistics_flags, .. } => {
|
||||
Kind::Secondary {
|
||||
occlusion_query,
|
||||
query_statistics_flags,
|
||||
..
|
||||
} => {
|
||||
let ps: vk::QueryPipelineStatisticFlagBits = query_statistics_flags.into();
|
||||
debug_assert!(ps == 0 ||
|
||||
alloc.device().enabled_features().pipeline_statistics_query);
|
||||
alloc.device().enabled_features().pipeline_statistics_query);
|
||||
|
||||
let (oqe, qf) = match occlusion_query {
|
||||
KindOcclusionQuery::Allowed { control_precise_allowed } => {
|
||||
debug_assert!(alloc.device().enabled_features().inherited_queries);
|
||||
let qf = if control_precise_allowed { vk::QUERY_CONTROL_PRECISE_BIT }
|
||||
else { 0 };
|
||||
let qf = if control_precise_allowed {
|
||||
vk::QUERY_CONTROL_PRECISE_BIT
|
||||
} else {
|
||||
0
|
||||
};
|
||||
(vk::TRUE, qf)
|
||||
},
|
||||
KindOcclusionQuery::Forbidden => (0, 0),
|
||||
@ -288,7 +294,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
|
||||
(oqe, qf, ps)
|
||||
},
|
||||
_ => (0, 0, 0)
|
||||
_ => (0, 0, 0),
|
||||
};
|
||||
|
||||
let inheritance = vk::CommandBufferInheritanceInfo {
|
||||
@ -387,22 +393,28 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
vk::ClearValue { color: vk::ClearColorValue { uint32: val } }
|
||||
},
|
||||
ClearValue::Depth(val) => {
|
||||
vk::ClearValue { depthStencil: vk::ClearDepthStencilValue {
|
||||
depth: val,
|
||||
stencil: 0,
|
||||
}}
|
||||
vk::ClearValue {
|
||||
depthStencil: vk::ClearDepthStencilValue {
|
||||
depth: val,
|
||||
stencil: 0,
|
||||
},
|
||||
}
|
||||
},
|
||||
ClearValue::Stencil(val) => {
|
||||
vk::ClearValue { depthStencil: vk::ClearDepthStencilValue {
|
||||
depth: 0.0,
|
||||
stencil: val,
|
||||
}}
|
||||
vk::ClearValue {
|
||||
depthStencil: vk::ClearDepthStencilValue {
|
||||
depth: 0.0,
|
||||
stencil: val,
|
||||
},
|
||||
}
|
||||
},
|
||||
ClearValue::DepthStencil((depth, stencil)) => {
|
||||
vk::ClearValue { depthStencil: vk::ClearDepthStencilValue {
|
||||
depth: depth,
|
||||
stencil: stencil,
|
||||
}}
|
||||
vk::ClearValue {
|
||||
depthStencil: vk::ClearDepthStencilValue {
|
||||
depth: depth,
|
||||
stencil: stencil,
|
||||
},
|
||||
}
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
@ -563,31 +575,33 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
{
|
||||
debug_assert!(filter == Filter::Nearest || !source.format().ty().is_depth_and_or_stencil());
|
||||
debug_assert!((source.format().ty() == FormatTy::Uint) ==
|
||||
(destination.format().ty() == FormatTy::Uint));
|
||||
(destination.format().ty() == FormatTy::Uint));
|
||||
debug_assert!((source.format().ty() == FormatTy::Sint) ==
|
||||
(destination.format().ty() == FormatTy::Sint));
|
||||
(destination.format().ty() == FormatTy::Sint));
|
||||
debug_assert!(source.format() == destination.format() ||
|
||||
!source.format().ty().is_depth_and_or_stencil());
|
||||
!source.format().ty().is_depth_and_or_stencil());
|
||||
|
||||
debug_assert_eq!(source.samples(), 1);
|
||||
let source = source.inner();
|
||||
debug_assert!(source.image.supports_blit_source());
|
||||
debug_assert!(source.image.usage_transfer_source());
|
||||
debug_assert!(source_layout == ImageLayout::General ||
|
||||
source_layout == ImageLayout::TransferSrcOptimal);
|
||||
source_layout == ImageLayout::TransferSrcOptimal);
|
||||
|
||||
debug_assert_eq!(destination.samples(), 1);
|
||||
let destination = destination.inner();
|
||||
debug_assert!(destination.image.supports_blit_destination());
|
||||
debug_assert!(destination.image.usage_transfer_destination());
|
||||
debug_assert!(destination_layout == ImageLayout::General ||
|
||||
destination_layout == ImageLayout::TransferDstOptimal);
|
||||
destination_layout == ImageLayout::TransferDstOptimal);
|
||||
|
||||
let regions: SmallVec<[_; 8]> = regions
|
||||
.filter_map(|blit| {
|
||||
// TODO: not everything is checked here
|
||||
debug_assert!(blit.source_base_array_layer + blit.layer_count <= source.num_layers as u32);
|
||||
debug_assert!(blit.destination_base_array_layer + blit.layer_count <= destination.num_layers as u32);
|
||||
debug_assert!(blit.source_base_array_layer + blit.layer_count <=
|
||||
source.num_layers as u32);
|
||||
debug_assert!(blit.destination_base_array_layer + blit.layer_count <=
|
||||
destination.num_layers as u32);
|
||||
debug_assert!(blit.source_mip_level < destination.num_mipmap_levels as u32);
|
||||
debug_assert!(blit.destination_mip_level < destination.num_mipmap_levels as u32);
|
||||
|
||||
@ -612,12 +626,13 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
x: blit.source_bottom_right[0],
|
||||
y: blit.source_bottom_right[1],
|
||||
z: blit.source_bottom_right[2],
|
||||
}
|
||||
},
|
||||
],
|
||||
dstSubresource: vk::ImageSubresourceLayers {
|
||||
aspectMask: blit.aspect.to_vk_bits(),
|
||||
mipLevel: blit.destination_mip_level,
|
||||
baseArrayLayer: blit.destination_base_array_layer + destination.first_layer as u32,
|
||||
baseArrayLayer: blit.destination_base_array_layer +
|
||||
destination.first_layer as u32,
|
||||
layerCount: blit.layer_count,
|
||||
},
|
||||
dstOffsets: [
|
||||
@ -630,7 +645,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
x: blit.destination_bottom_right[0],
|
||||
y: blit.destination_bottom_right[1],
|
||||
z: blit.destination_bottom_right[2],
|
||||
}
|
||||
},
|
||||
],
|
||||
})
|
||||
})
|
||||
@ -642,9 +657,14 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdBlitImage(cmd, source.image.internal_object(), source_layout as u32,
|
||||
destination.image.internal_object(), destination_layout as u32,
|
||||
regions.len() as u32, regions.as_ptr(), filter as u32);
|
||||
vk.CmdBlitImage(cmd,
|
||||
source.image.internal_object(),
|
||||
source_layout as u32,
|
||||
destination.image.internal_object(),
|
||||
destination_layout as u32,
|
||||
regions.len() as u32,
|
||||
regions.as_ptr(),
|
||||
filter as u32);
|
||||
}
|
||||
|
||||
// TODO: missing structs
|
||||
@ -681,13 +701,12 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
R: Iterator<Item = UnsafeCommandBufferBuilderColorImageClear>
|
||||
{
|
||||
debug_assert!(image.format().ty() == FormatTy::Float ||
|
||||
image.format().ty() == FormatTy::Uint ||
|
||||
image.format().ty() == FormatTy::Sint);
|
||||
image.format().ty() == FormatTy::Uint ||
|
||||
image.format().ty() == FormatTy::Sint);
|
||||
|
||||
let image = image.inner();
|
||||
debug_assert!(image.image.usage_transfer_destination());
|
||||
debug_assert!(layout == ImageLayout::General ||
|
||||
layout == ImageLayout::TransferDstOptimal);
|
||||
debug_assert!(layout == ImageLayout::General || layout == ImageLayout::TransferDstOptimal);
|
||||
|
||||
let color = match color {
|
||||
ClearValue::Float(val) => {
|
||||
@ -706,20 +725,22 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
|
||||
let regions: SmallVec<[_; 8]> = regions
|
||||
.filter_map(|region| {
|
||||
debug_assert!(region.layer_count + region.base_array_layer <= image.num_layers as u32);
|
||||
debug_assert!(region.level_count + region.base_mip_level <= image.num_mipmap_levels as u32);
|
||||
debug_assert!(region.layer_count + region.base_array_layer <=
|
||||
image.num_layers as u32);
|
||||
debug_assert!(region.level_count + region.base_mip_level <=
|
||||
image.num_mipmap_levels as u32);
|
||||
|
||||
if region.layer_count == 0 || region.level_count == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(vk::ImageSubresourceRange {
|
||||
aspectMask: vk::IMAGE_ASPECT_COLOR_BIT,
|
||||
baseMipLevel: region.base_mip_level + image.first_mipmap_level as u32,
|
||||
levelCount: region.level_count,
|
||||
baseArrayLayer: region.base_array_layer + image.first_layer as u32,
|
||||
layerCount: region.layer_count,
|
||||
})
|
||||
aspectMask: vk::IMAGE_ASPECT_COLOR_BIT,
|
||||
baseMipLevel: region.base_mip_level + image.first_mipmap_level as u32,
|
||||
levelCount: region.level_count,
|
||||
baseArrayLayer: region.base_array_layer + image.first_layer as u32,
|
||||
layerCount: region.layer_count,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -799,7 +820,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let destination = destination.inner();
|
||||
debug_assert!(destination.image.usage_transfer_destination());
|
||||
debug_assert!(destination_layout == ImageLayout::General ||
|
||||
destination_layout == ImageLayout::TransferDstOptimal);
|
||||
destination_layout == ImageLayout::TransferDstOptimal);
|
||||
|
||||
let regions: SmallVec<[_; 8]> = regions
|
||||
.map(|copy| {
|
||||
@ -813,7 +834,8 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
imageSubresource: vk::ImageSubresourceLayers {
|
||||
aspectMask: copy.image_aspect.to_vk_bits(),
|
||||
mipLevel: copy.image_mip_level + destination.first_mipmap_level as u32,
|
||||
baseArrayLayer: copy.image_base_array_layer + destination.first_layer as u32,
|
||||
baseArrayLayer: copy.image_base_array_layer +
|
||||
destination.first_layer as u32,
|
||||
layerCount: copy.image_layer_count,
|
||||
},
|
||||
imageOffset: vk::Offset3D {
|
||||
@ -859,7 +881,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let source = source.inner();
|
||||
debug_assert!(source.image.usage_transfer_source());
|
||||
debug_assert!(source_layout == ImageLayout::General ||
|
||||
source_layout == ImageLayout::TransferSrcOptimal);
|
||||
source_layout == ImageLayout::TransferSrcOptimal);
|
||||
|
||||
let destination = destination.inner();
|
||||
debug_assert!(destination.offset < destination.buffer.size());
|
||||
@ -911,20 +933,23 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
/// Calls `vkCmdCopyQueryPoolResults` on the builder.
|
||||
#[inline]
|
||||
pub unsafe fn copy_query_pool_results(&mut self, queries: UnsafeQueriesRange,
|
||||
destination: &BufferAccess, stride: usize)
|
||||
{
|
||||
destination: &BufferAccess, stride: usize) {
|
||||
let destination = destination.inner();
|
||||
debug_assert!(destination.offset < destination.buffer.size());
|
||||
debug_assert!(destination.buffer.usage_transfer_destination());
|
||||
|
||||
let flags = 0; // FIXME:
|
||||
let flags = 0; // FIXME:
|
||||
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdCopyQueryPoolResults(cmd, queries.pool().internal_object(), queries.first_index(),
|
||||
queries.count(), destination.buffer.internal_object(),
|
||||
vk.CmdCopyQueryPoolResults(cmd,
|
||||
queries.pool().internal_object(),
|
||||
queries.first_index(),
|
||||
queries.count(),
|
||||
destination.buffer.internal_object(),
|
||||
destination.offset as vk::DeviceSize,
|
||||
stride as vk::DeviceSize, flags);
|
||||
stride as vk::DeviceSize,
|
||||
flags);
|
||||
}
|
||||
|
||||
/// Calls `vkCmdDispatch` on the builder.
|
||||
@ -1167,7 +1192,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
pub unsafe fn reset_query_pool(&mut self, queries: UnsafeQueriesRange) {
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdResetQueryPool(cmd, queries.pool().internal_object(), queries.first_index(),
|
||||
vk.CmdResetQueryPool(cmd,
|
||||
queries.pool().internal_object(),
|
||||
queries.first_index(),
|
||||
queries.count());
|
||||
}
|
||||
|
||||
@ -1338,7 +1365,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
pub unsafe fn write_timestamp(&mut self, query: UnsafeQuery, stages: PipelineStages) {
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdWriteTimestamp(cmd, stages.into_vulkan_bits(), query.pool().internal_object(),
|
||||
vk.CmdWriteTimestamp(cmd,
|
||||
stages.into_vulkan_bits(),
|
||||
query.pool().internal_object(),
|
||||
query.index());
|
||||
}
|
||||
}
|
||||
@ -1706,7 +1735,8 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
|
||||
image: image.image.internal_object(),
|
||||
subresourceRange: vk::ImageSubresourceRange {
|
||||
aspectMask: aspect_mask,
|
||||
baseMipLevel: mipmaps.start + image.first_mipmap_level as u32,
|
||||
baseMipLevel: mipmaps.start +
|
||||
image.first_mipmap_level as u32,
|
||||
levelCount: mipmaps.end - mipmaps.start,
|
||||
baseArrayLayer: layers.start + image.first_layer as u32,
|
||||
layerCount: layers.end - layers.start,
|
||||
|
@ -54,8 +54,7 @@ pub unsafe trait CommandBuffer: DeviceOwned {
|
||||
///
|
||||
/// If you call this function, then you should call `unlock` afterwards.
|
||||
// TODO: require `&mut self` instead, but this has some consequences on other parts of the lib
|
||||
fn lock_submit(&self, future: &GpuFuture, queue: &Queue)
|
||||
-> Result<(), CommandBufferExecError>;
|
||||
fn lock_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError>;
|
||||
|
||||
/// Unlocks the command buffer. Should be called once for each call to `lock_submit`.
|
||||
///
|
||||
@ -171,8 +170,7 @@ unsafe impl<T> CommandBuffer for T
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn lock_submit(&self, future: &GpuFuture, queue: &Queue)
|
||||
-> Result<(), CommandBufferExecError> {
|
||||
fn lock_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
|
||||
(**self).lock_submit(future, queue)
|
||||
}
|
||||
|
||||
@ -391,7 +389,7 @@ impl error::Error for CommandBufferExecError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
CommandBufferExecError::AccessError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,10 +27,11 @@ use sampler::Filter;
|
||||
///
|
||||
pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32; 3],
|
||||
source_bottom_right: [i32; 3], source_base_array_layer: u32,
|
||||
source_mip_level: u32, destination: &D, destination_top_left: [i32; 3],
|
||||
destination_bottom_right: [i32; 3], destination_base_array_layer: u32,
|
||||
destination_mip_level: u32, layer_count: u32, filter: Filter)
|
||||
-> Result<(), CheckBlitImageError>
|
||||
source_mip_level: u32, destination: &D,
|
||||
destination_top_left: [i32; 3], destination_bottom_right: [i32; 3],
|
||||
destination_base_array_layer: u32, destination_mip_level: u32,
|
||||
layer_count: u32, filter: Filter)
|
||||
-> Result<(), CheckBlitImageError>
|
||||
where S: ?Sized + ImageAccess,
|
||||
D: ?Sized + ImageAccess
|
||||
{
|
||||
@ -77,12 +78,12 @@ pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32
|
||||
|
||||
let types_should_be_same =
|
||||
source_format_ty == FormatTy::Uint || destination_format_ty == FormatTy::Uint ||
|
||||
source_format_ty == FormatTy::Sint || destination_format_ty == FormatTy::Sint;
|
||||
source_format_ty == FormatTy::Sint || destination_format_ty == FormatTy::Sint;
|
||||
if types_should_be_same && (source_format_ty != destination_format_ty) {
|
||||
return Err(CheckBlitImageError::IncompatibleFormatsTypes {
|
||||
source_format_ty: source.format().ty(),
|
||||
destination_format_ty: destination.format().ty()
|
||||
});
|
||||
source_format_ty: source.format().ty(),
|
||||
destination_format_ty: destination.format().ty(),
|
||||
});
|
||||
}
|
||||
|
||||
let source_dimensions = match source.dimensions().mipmap_dimensions(source_mip_level) {
|
||||
@ -90,7 +91,9 @@ pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32
|
||||
None => return Err(CheckBlitImageError::SourceCoordinatesOutOfRange),
|
||||
};
|
||||
|
||||
let destination_dimensions = match destination.dimensions().mipmap_dimensions(destination_mip_level) {
|
||||
let destination_dimensions = match destination
|
||||
.dimensions()
|
||||
.mipmap_dimensions(destination_mip_level) {
|
||||
Some(d) => d,
|
||||
None => return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange),
|
||||
};
|
||||
@ -127,27 +130,39 @@ pub fn check_blit_image<S, D>(device: &Device, source: &S, source_top_left: [i32
|
||||
return Err(CheckBlitImageError::SourceCoordinatesOutOfRange);
|
||||
}
|
||||
|
||||
if destination_top_left[0] < 0 || destination_top_left[0] > destination_dimensions.width() as i32 {
|
||||
if destination_top_left[0] < 0 ||
|
||||
destination_top_left[0] > destination_dimensions.width() as i32
|
||||
{
|
||||
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
|
||||
}
|
||||
|
||||
if destination_top_left[1] < 0 || destination_top_left[1] > destination_dimensions.height() as i32 {
|
||||
if destination_top_left[1] < 0 ||
|
||||
destination_top_left[1] > destination_dimensions.height() as i32
|
||||
{
|
||||
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
|
||||
}
|
||||
|
||||
if destination_top_left[2] < 0 || destination_top_left[2] > destination_dimensions.depth() as i32 {
|
||||
if destination_top_left[2] < 0 ||
|
||||
destination_top_left[2] > destination_dimensions.depth() as i32
|
||||
{
|
||||
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
|
||||
}
|
||||
|
||||
if destination_bottom_right[0] < 0 || destination_bottom_right[0] > destination_dimensions.width() as i32 {
|
||||
if destination_bottom_right[0] < 0 ||
|
||||
destination_bottom_right[0] > destination_dimensions.width() as i32
|
||||
{
|
||||
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
|
||||
}
|
||||
|
||||
if destination_bottom_right[1] < 0 || destination_bottom_right[1] > destination_dimensions.height() as i32 {
|
||||
if destination_bottom_right[1] < 0 ||
|
||||
destination_bottom_right[1] > destination_dimensions.height() as i32
|
||||
{
|
||||
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
|
||||
}
|
||||
|
||||
if destination_bottom_right[2] < 0 || destination_bottom_right[2] > destination_dimensions.depth() as i32 {
|
||||
if destination_bottom_right[2] < 0 ||
|
||||
destination_bottom_right[2] > destination_dimensions.depth() as i32
|
||||
{
|
||||
return Err(CheckBlitImageError::DestinationCoordinatesOutOfRange);
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ use image::ImageAccess;
|
||||
pub fn check_clear_color_image<I>(device: &Device, image: &I, first_layer: u32, num_layers: u32,
|
||||
first_mipmap: u32, num_mipmaps: u32)
|
||||
-> Result<(), CheckClearColorImageError>
|
||||
where I: ?Sized + ImageAccess,
|
||||
where I: ?Sized + ImageAccess
|
||||
{
|
||||
assert_eq!(image.inner().image.device().internal_object(),
|
||||
device.internal_object());
|
||||
|
@ -26,7 +26,7 @@ pub fn check_copy_buffer<S, D, T>(device: &Device, source: &S, destination: &D)
|
||||
-> Result<CheckCopyBuffer, CheckCopyBufferError>
|
||||
where S: ?Sized + TypedBufferAccess<Content = T>,
|
||||
D: ?Sized + TypedBufferAccess<Content = T>,
|
||||
T: ?Sized,
|
||||
T: ?Sized
|
||||
{
|
||||
assert_eq!(source.inner().buffer.device().internal_object(),
|
||||
device.internal_object());
|
||||
|
@ -41,7 +41,7 @@ pub fn check_copy_buffer_image<B, I, P>(device: &Device, buffer: &B, image: &I,
|
||||
-> Result<(), CheckCopyBufferImageError>
|
||||
where I: ?Sized + ImageAccess,
|
||||
B: ?Sized + TypedBufferAccess<Content = [P]>,
|
||||
Format: AcceptsPixels<P>, // TODO: use a trait on the image itself instead
|
||||
Format: AcceptsPixels<P> // TODO: use a trait on the image itself instead
|
||||
{
|
||||
let buffer_inner = buffer.inner();
|
||||
let image_inner = image.inner();
|
||||
@ -102,9 +102,9 @@ pub fn check_copy_buffer_image<B, I, P>(device: &Device, buffer: &B, image: &I,
|
||||
let required_len = num_texels as usize * image.format().rate() as usize;
|
||||
if required_len > buffer.len() {
|
||||
return Err(CheckCopyBufferImageError::BufferTooSmall {
|
||||
required_len: required_len,
|
||||
actual_len: buffer.len(),
|
||||
});
|
||||
required_len: required_len,
|
||||
actual_len: buffer.len(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ impl error::Error for CheckCopyBufferImageError {
|
||||
CheckCopyBufferImageError::WrongPixelType(ref err) => {
|
||||
Some(err)
|
||||
},
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
pub fn check_descriptor_sets_validity<Pl, D>(pipeline: &Pl, descriptor_sets: &D)
|
||||
-> Result<(), CheckDescriptorSetsValidityError>
|
||||
where Pl: ?Sized + PipelineLayoutDesc,
|
||||
D: ?Sized + DescriptorSetsCollection,
|
||||
D: ?Sized + DescriptorSetsCollection
|
||||
{
|
||||
// What's important is not that the pipeline layout and the descriptor sets *match*. Instead
|
||||
// what's important is that the descriptor sets are a superset of the pipeline layout. It's not
|
||||
@ -30,19 +30,20 @@ pub fn check_descriptor_sets_validity<Pl, D>(pipeline: &Pl, descriptor_sets: &D)
|
||||
|
||||
let (set_desc, pipeline_desc) = match (set_desc, pipeline_desc) {
|
||||
(Some(s), Some(p)) => (s, p),
|
||||
(None, Some(_)) => return Err(CheckDescriptorSetsValidityError::MissingDescriptor {
|
||||
set_num: set_num,
|
||||
binding_num: binding_num,
|
||||
}),
|
||||
(None, Some(_)) =>
|
||||
return Err(CheckDescriptorSetsValidityError::MissingDescriptor {
|
||||
set_num: set_num,
|
||||
binding_num: binding_num,
|
||||
}),
|
||||
(Some(_), None) => continue,
|
||||
(None, None) => continue,
|
||||
};
|
||||
|
||||
if !set_desc.is_superset_of(&pipeline_desc) {
|
||||
return Err(CheckDescriptorSetsValidityError::IncompatibleDescriptor {
|
||||
set_num: set_num,
|
||||
binding_num: binding_num,
|
||||
});
|
||||
set_num: set_num,
|
||||
binding_num: binding_num,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,13 +14,16 @@ use device::Device;
|
||||
|
||||
/// Checks whether the dispatch dimensions are supported by the device.
|
||||
pub fn check_dispatch(device: &Device, dimensions: [u32; 3]) -> Result<(), CheckDispatchError> {
|
||||
let max = device.physical_device().limits().max_compute_work_group_count();
|
||||
let max = device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_compute_work_group_count();
|
||||
|
||||
if dimensions[0] > max[0] || dimensions[1] > max[1] || dimensions[2] > max[2] {
|
||||
return Err(CheckDispatchError::UnsupportedDimensions {
|
||||
requested: dimensions,
|
||||
max_supported: max,
|
||||
});
|
||||
requested: dimensions,
|
||||
max_supported: max,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -67,7 +70,11 @@ mod tests {
|
||||
let attempted = [u32::max_value(), u32::max_value(), u32::max_value()];
|
||||
|
||||
// Just in case the device is some kind of software implementation.
|
||||
if device.physical_device().limits().max_compute_work_group_count() == attempted {
|
||||
if device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_compute_work_group_count() == attempted
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
@ -75,7 +82,7 @@ mod tests {
|
||||
Err(validity::CheckDispatchError::UnsupportedDimensions { requested, .. }) => {
|
||||
assert_eq!(requested, attempted);
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -39,9 +39,9 @@ pub fn check_dynamic_state_validity<Pl>(pipeline: &Pl, state: &DynamicState)
|
||||
if let Some(ref viewports) = state.viewports {
|
||||
if viewports.len() != pipeline.num_viewports() as usize {
|
||||
return Err(CheckDynamicStateValidityError::ViewportsCountMismatch {
|
||||
expected: pipeline.num_viewports() as usize,
|
||||
obtained: viewports.len(),
|
||||
});
|
||||
expected: pipeline.num_viewports() as usize,
|
||||
obtained: viewports.len(),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
return Err(CheckDynamicStateValidityError::ViewportsMissing);
|
||||
@ -57,9 +57,9 @@ pub fn check_dynamic_state_validity<Pl>(pipeline: &Pl, state: &DynamicState)
|
||||
if let Some(ref scissors) = state.scissors {
|
||||
if scissors.len() != pipeline.num_viewports() as usize {
|
||||
return Err(CheckDynamicStateValidityError::ScissorsCountMismatch {
|
||||
expected: pipeline.num_viewports() as usize,
|
||||
obtained: scissors.len(),
|
||||
});
|
||||
expected: pipeline.num_viewports() as usize,
|
||||
obtained: scissors.len(),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
return Err(CheckDynamicStateValidityError::ScissorsMissing);
|
||||
|
@ -70,19 +70,20 @@ impl fmt::Display for CheckFillBufferError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::CpuAccessibleBuffer;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn missing_usage() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(),
|
||||
0u32).unwrap();
|
||||
let buffer =
|
||||
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(), 0u32)
|
||||
.unwrap();
|
||||
|
||||
match check_fill_buffer(&device, &buffer) {
|
||||
Err(CheckFillBufferError::BufferMissingUsage) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,7 +94,7 @@ mod tests {
|
||||
let buffer = CpuAccessibleBuffer::from_data(dev1, BufferUsage::all(), 0u32).unwrap();
|
||||
|
||||
assert_should_panic!({
|
||||
let _ = check_fill_buffer(&dev2, &buffer);
|
||||
});
|
||||
let _ = check_fill_buffer(&dev2, &buffer);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -40,9 +40,7 @@ pub fn check_index_buffer<B, I>(device: &Device, buffer: &B)
|
||||
|
||||
// TODO: fullDrawIndexUint32 feature
|
||||
|
||||
Ok(CheckIndexBuffer {
|
||||
num_indices: buffer.len(),
|
||||
})
|
||||
Ok(CheckIndexBuffer { num_indices: buffer.len() })
|
||||
}
|
||||
|
||||
/// Information returned if `check_index_buffer` succeeds.
|
||||
@ -89,33 +87,37 @@ impl fmt::Display for CheckIndexBufferError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::CpuAccessibleBuffer;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn num_indices() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::index_buffer(),
|
||||
0 .. 500u32).unwrap();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
|
||||
BufferUsage::index_buffer(),
|
||||
0 .. 500u32)
|
||||
.unwrap();
|
||||
|
||||
match check_index_buffer(&device, &buffer) {
|
||||
Ok(CheckIndexBuffer { num_indices }) => {
|
||||
assert_eq!(num_indices, 500);
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_usage() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::vertex_buffer(),
|
||||
0 .. 500u32).unwrap();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
|
||||
BufferUsage::vertex_buffer(),
|
||||
0 .. 500u32)
|
||||
.unwrap();
|
||||
|
||||
match check_index_buffer(&device, &buffer) {
|
||||
Err(CheckIndexBufferError::BufferMissingUsage) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,11 +126,10 @@ mod tests {
|
||||
let (dev1, queue) = gfx_dev_and_queue!();
|
||||
let (dev2, _) = gfx_dev_and_queue!();
|
||||
|
||||
let buffer = CpuAccessibleBuffer::from_iter(dev1, BufferUsage::all(),
|
||||
0 .. 500u32).unwrap();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(dev1, BufferUsage::all(), 0 .. 500u32).unwrap();
|
||||
|
||||
assert_should_panic!({
|
||||
let _ = check_index_buffer(&dev2, &buffer);
|
||||
});
|
||||
let _ = check_index_buffer(&dev2, &buffer);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -9,18 +9,19 @@
|
||||
|
||||
//! Functions that check the validity of commands.
|
||||
|
||||
pub use self::blit_image::{check_blit_image, CheckBlitImageError};
|
||||
pub use self::clear_color_image::{check_clear_color_image, CheckClearColorImageError};
|
||||
pub use self::copy_buffer::{CheckCopyBufferError, check_copy_buffer, CheckCopyBuffer};
|
||||
pub use self::copy_image_buffer::{CheckCopyBufferImageError, check_copy_buffer_image, CheckCopyBufferImageTy};
|
||||
pub use self::descriptor_sets::{check_descriptor_sets_validity, CheckDescriptorSetsValidityError};
|
||||
pub use self::dispatch::{check_dispatch, CheckDispatchError};
|
||||
pub use self::blit_image::{CheckBlitImageError, check_blit_image};
|
||||
pub use self::clear_color_image::{CheckClearColorImageError, check_clear_color_image};
|
||||
pub use self::copy_buffer::{CheckCopyBuffer, CheckCopyBufferError, check_copy_buffer};
|
||||
pub use self::copy_image_buffer::{CheckCopyBufferImageError, CheckCopyBufferImageTy,
|
||||
check_copy_buffer_image};
|
||||
pub use self::descriptor_sets::{CheckDescriptorSetsValidityError, check_descriptor_sets_validity};
|
||||
pub use self::dispatch::{CheckDispatchError, check_dispatch};
|
||||
pub use self::dynamic_state::{CheckDynamicStateValidityError, check_dynamic_state_validity};
|
||||
pub use self::fill_buffer::{CheckFillBufferError, check_fill_buffer};
|
||||
pub use self::index_buffer::{check_index_buffer, CheckIndexBuffer, CheckIndexBufferError};
|
||||
pub use self::push_constants::{check_push_constants_validity, CheckPushConstantsValidityError};
|
||||
pub use self::index_buffer::{CheckIndexBuffer, CheckIndexBufferError, check_index_buffer};
|
||||
pub use self::push_constants::{CheckPushConstantsValidityError, check_push_constants_validity};
|
||||
pub use self::update_buffer::{CheckUpdateBufferError, check_update_buffer};
|
||||
pub use self::vertex_buffers::{check_vertex_buffers, CheckVertexBuffer, CheckVertexBufferError};
|
||||
pub use self::vertex_buffers::{CheckVertexBuffer, CheckVertexBufferError, check_vertex_buffers};
|
||||
|
||||
mod blit_image;
|
||||
mod clear_color_image;
|
||||
|
@ -17,7 +17,7 @@ use descriptor::pipeline_layout::PipelineLayoutPushConstantsCompatible;
|
||||
pub fn check_push_constants_validity<Pl, Pc>(pipeline: &Pl, push_constants: &Pc)
|
||||
-> Result<(), CheckPushConstantsValidityError>
|
||||
where Pl: ?Sized + PipelineLayoutAbstract + PipelineLayoutPushConstantsCompatible<Pc>,
|
||||
Pc: ?Sized,
|
||||
Pc: ?Sized
|
||||
{
|
||||
if !pipeline.is_compatible(push_constants) {
|
||||
return Err(CheckPushConstantsValidityError::IncompatiblePushConstants);
|
||||
|
@ -87,59 +87,66 @@ impl fmt::Display for CheckUpdateBufferError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use buffer::BufferAccess;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::CpuAccessibleBuffer;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn missing_usage() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(),
|
||||
0u32).unwrap();
|
||||
let buffer =
|
||||
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::vertex_buffer(), 0u32)
|
||||
.unwrap();
|
||||
|
||||
match check_update_buffer(&device, &buffer, &0) {
|
||||
Err(CheckUpdateBufferError::BufferMissingUsage) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn data_too_large() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::transfer_destination(),
|
||||
0 .. 65536).unwrap();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
|
||||
BufferUsage::transfer_destination(),
|
||||
0 .. 65536)
|
||||
.unwrap();
|
||||
let data = (0 .. 65536).collect::<Vec<u32>>();
|
||||
|
||||
match check_update_buffer(&device, &buffer, &data[..]) {
|
||||
Err(CheckUpdateBufferError::DataTooLarge) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn data_just_large_enough() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::transfer_destination(),
|
||||
(0 .. 100000).map(|_| 0)).unwrap();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
|
||||
BufferUsage::transfer_destination(),
|
||||
(0 .. 100000).map(|_| 0))
|
||||
.unwrap();
|
||||
let data = (0 .. 65536).map(|_| 0).collect::<Vec<u8>>();
|
||||
|
||||
match check_update_buffer(&device, &buffer, &data[..]) {
|
||||
Ok(_) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrong_alignment() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::transfer_destination(),
|
||||
0 .. 100).unwrap();
|
||||
let buffer = CpuAccessibleBuffer::from_iter(device.clone(),
|
||||
BufferUsage::transfer_destination(),
|
||||
0 .. 100)
|
||||
.unwrap();
|
||||
let data = (0 .. 30).collect::<Vec<u8>>();
|
||||
|
||||
match check_update_buffer(&device, &buffer.slice(1 .. 50).unwrap(), &data[..]) {
|
||||
Err(CheckUpdateBufferError::WrongAlignment) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,11 +154,10 @@ mod tests {
|
||||
fn wrong_device() {
|
||||
let (dev1, queue) = gfx_dev_and_queue!();
|
||||
let (dev2, _) = gfx_dev_and_queue!();
|
||||
let buffer = CpuAccessibleBuffer::from_data(dev1, BufferUsage::all(),
|
||||
0u32).unwrap();
|
||||
let buffer = CpuAccessibleBuffer::from_data(dev1, BufferUsage::all(), 0u32).unwrap();
|
||||
|
||||
assert_should_panic!({
|
||||
let _ = check_update_buffer(&dev2, &buffer, &0);
|
||||
});
|
||||
let _ = check_update_buffer(&dev2, &buffer, &0);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -37,10 +37,10 @@ pub fn check_vertex_buffers<P, V>(pipeline: &P, vertex_buffers: V)
|
||||
}
|
||||
|
||||
Ok(CheckVertexBuffer {
|
||||
vertex_buffers,
|
||||
vertex_count: vertex_count as u32,
|
||||
instance_count: instance_count as u32,
|
||||
})
|
||||
vertex_buffers,
|
||||
vertex_count: vertex_count as u32,
|
||||
instance_count: instance_count as u32,
|
||||
})
|
||||
}
|
||||
|
||||
/// Information returned if `check_vertex_buffer` succeeds.
|
||||
|
@ -43,10 +43,10 @@
|
||||
|
||||
use format::Format;
|
||||
use image::Dimensions;
|
||||
use sync::AccessFlagBits;
|
||||
use sync::PipelineStages;
|
||||
use std::cmp;
|
||||
use std::ops::BitOr;
|
||||
use sync::AccessFlagBits;
|
||||
use sync::PipelineStages;
|
||||
use vk;
|
||||
|
||||
/// Contains the exact description of a single descriptor.
|
||||
@ -114,24 +114,25 @@ impl DescriptorDesc {
|
||||
|
||||
let access = match self.ty {
|
||||
DescriptorDescTy::Sampler => panic!(),
|
||||
DescriptorDescTy::CombinedImageSampler(_) | DescriptorDescTy::Image(_) => {
|
||||
DescriptorDescTy::CombinedImageSampler(_) |
|
||||
DescriptorDescTy::Image(_) => {
|
||||
AccessFlagBits {
|
||||
shader_read: true,
|
||||
shader_write: !self.readonly,
|
||||
.. AccessFlagBits::none()
|
||||
..AccessFlagBits::none()
|
||||
}
|
||||
},
|
||||
DescriptorDescTy::TexelBuffer { .. } => {
|
||||
AccessFlagBits {
|
||||
shader_read: true,
|
||||
shader_write: !self.readonly,
|
||||
.. AccessFlagBits::none()
|
||||
..AccessFlagBits::none()
|
||||
}
|
||||
},
|
||||
DescriptorDescTy::InputAttachment { .. } => {
|
||||
AccessFlagBits {
|
||||
input_attachment_read: true,
|
||||
.. AccessFlagBits::none()
|
||||
..AccessFlagBits::none()
|
||||
}
|
||||
},
|
||||
DescriptorDescTy::Buffer(ref buf) => {
|
||||
@ -139,12 +140,12 @@ impl DescriptorDesc {
|
||||
AccessFlagBits {
|
||||
shader_read: true,
|
||||
shader_write: !self.readonly,
|
||||
.. AccessFlagBits::none()
|
||||
..AccessFlagBits::none()
|
||||
}
|
||||
} else {
|
||||
AccessFlagBits {
|
||||
uniform_read: true,
|
||||
.. AccessFlagBits::none()
|
||||
..AccessFlagBits::none()
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -545,7 +546,7 @@ impl From<ShaderStages> for PipelineStages {
|
||||
geometry_shader: stages.geometry,
|
||||
fragment_shader: stages.fragment,
|
||||
compute_shader: stages.compute,
|
||||
.. PipelineStages::none()
|
||||
..PipelineStages::none()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,27 +7,27 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::sync::Arc;
|
||||
use crossbeam::sync::SegQueue;
|
||||
use std::sync::Arc;
|
||||
|
||||
use OomError;
|
||||
use buffer::BufferAccess;
|
||||
use buffer::BufferViewRef;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor_set::persistent::*;
|
||||
use descriptor::descriptor_set::DescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSetDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorPool;
|
||||
use descriptor::descriptor_set::DescriptorPool;
|
||||
use descriptor::descriptor_set::DescriptorPoolAlloc;
|
||||
use descriptor::descriptor_set::DescriptorPoolAllocError;
|
||||
use descriptor::descriptor_set::DescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSetDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorPool;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSet;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::descriptor_set::persistent::*;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use image::ImageViewAccess;
|
||||
use sampler::Sampler;
|
||||
use OomError;
|
||||
|
||||
/// Pool of descriptor sets of a specific capacity and that are automatically reclaimed.
|
||||
///
|
||||
@ -55,7 +55,8 @@ impl<L> FixedSizeDescriptorSetsPool<L> {
|
||||
|
||||
let device = layout.device().clone();
|
||||
|
||||
let set_layout = layout.descriptor_set_layout(set_id)
|
||||
let set_layout = layout
|
||||
.descriptor_set_layout(set_id)
|
||||
.expect("Unable to get the descriptor set layout")
|
||||
.clone();
|
||||
|
||||
@ -89,7 +90,7 @@ impl<L> FixedSizeDescriptorSetsPool<L> {
|
||||
|
||||
/// A descriptor set created from a `FixedSizeDescriptorSetsPool`.
|
||||
pub struct FixedSizeDescriptorSet<L, R> {
|
||||
inner: PersistentDescriptorSet<L, R, LocalPoolAlloc>
|
||||
inner: PersistentDescriptorSet<L, R, LocalPoolAlloc>,
|
||||
}
|
||||
|
||||
unsafe impl<L, R> DescriptorSet for FixedSizeDescriptorSet<L, R>
|
||||
@ -189,17 +190,17 @@ unsafe impl DescriptorPool for LocalPool {
|
||||
if let Some(ref mut current_pool) = self.current_pool {
|
||||
if let Some(already_existing_set) = current_pool.reserve.try_pop() {
|
||||
return Ok(LocalPoolAlloc {
|
||||
actual_alloc: Some(already_existing_set),
|
||||
pool: current_pool.clone(),
|
||||
});
|
||||
actual_alloc: Some(already_existing_set),
|
||||
pool: current_pool.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// If we failed to grab an existing set, that means the current pool is full. Create a
|
||||
// new one of larger capacity.
|
||||
let count = *layout.descriptors_count() * self.next_capacity;
|
||||
let mut new_pool = UnsafeDescriptorPool::new(self.device.clone(), &count,
|
||||
self.next_capacity, false)?;
|
||||
let mut new_pool =
|
||||
UnsafeDescriptorPool::new(self.device.clone(), &count, self.next_capacity, false)?;
|
||||
let alloc = unsafe {
|
||||
match new_pool.alloc((0 .. self.next_capacity).map(|_| layout)) {
|
||||
Ok(iter) => {
|
||||
@ -227,9 +228,9 @@ unsafe impl DescriptorPool for LocalPool {
|
||||
|
||||
self.next_capacity = self.next_capacity.saturating_mul(2);
|
||||
self.current_pool = Some(Arc::new(LocalPoolInner {
|
||||
actual_pool: new_pool,
|
||||
reserve: alloc,
|
||||
}));
|
||||
actual_pool: new_pool,
|
||||
reserve: alloc,
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -278,9 +279,7 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
|
||||
#[inline]
|
||||
pub fn build(self) -> Result<FixedSizeDescriptorSet<L, R>, PersistentDescriptorSetBuildError> {
|
||||
let inner = self.inner.build_with_pool(&mut self.pool.pool)?;
|
||||
Ok(FixedSizeDescriptorSet {
|
||||
inner: inner,
|
||||
})
|
||||
Ok(FixedSizeDescriptorSet { inner: inner })
|
||||
}
|
||||
|
||||
/// Call this function if the next element of the set is an array in order to set the value of
|
||||
@ -291,20 +290,24 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
|
||||
/// This function can be called even if the descriptor isn't an array, and it is valid to enter
|
||||
/// the "array", add one element, then leave.
|
||||
#[inline]
|
||||
pub fn enter_array(self) -> Result<FixedSizeDescriptorSetBuilderArray<'a, L, R>, PersistentDescriptorSetError> {
|
||||
pub fn enter_array(
|
||||
self)
|
||||
-> Result<FixedSizeDescriptorSetBuilderArray<'a, L, R>, PersistentDescriptorSetError> {
|
||||
Ok(FixedSizeDescriptorSetBuilderArray {
|
||||
pool: self.pool,
|
||||
inner: self.inner.enter_array()?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.enter_array()?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Skips the current descriptor if it is empty.
|
||||
#[inline]
|
||||
pub fn add_empty(self) -> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
|
||||
pub fn add_empty(
|
||||
self)
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
|
||||
Ok(FixedSizeDescriptorSetBuilder {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_empty()?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_empty()?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a buffer as the next descriptor.
|
||||
@ -317,13 +320,17 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
|
||||
///
|
||||
#[inline]
|
||||
pub fn add_buffer<T>(self, buffer: T)
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a, L, (R, PersistentDescriptorSetBuf<T>)>, PersistentDescriptorSetError>
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a,
|
||||
L,
|
||||
(R,
|
||||
PersistentDescriptorSetBuf<T>)>,
|
||||
PersistentDescriptorSetError>
|
||||
where T: BufferAccess
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilder {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer(buffer)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer(buffer)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a buffer view as the next descriptor.
|
||||
@ -339,9 +346,9 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
|
||||
where T: BufferViewRef
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilder {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer_view(view)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer_view(view)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds an image view as the next descriptor.
|
||||
@ -354,13 +361,16 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
|
||||
///
|
||||
#[inline]
|
||||
pub fn add_image<T>(self, image_view: T)
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a, L, (R, PersistentDescriptorSetImg<T>)>, PersistentDescriptorSetError>
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a,
|
||||
L,
|
||||
(R, PersistentDescriptorSetImg<T>)>,
|
||||
PersistentDescriptorSetError>
|
||||
where T: ImageViewAccess
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilder {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_image(image_view)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_image(image_view)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds an image view with a sampler as the next descriptor.
|
||||
@ -377,9 +387,9 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
|
||||
where T: ImageViewAccess
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilder {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampled_image(image_view, sampler)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampled_image(image_view, sampler)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a sampler as the next descriptor.
|
||||
@ -392,12 +402,14 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilder<'a, L, R>
|
||||
///
|
||||
#[inline]
|
||||
pub fn add_sampler(self, sampler: Arc<Sampler>)
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a, L, (R, PersistentDescriptorSetSampler)>, PersistentDescriptorSetError>
|
||||
{
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a,
|
||||
L,
|
||||
(R, PersistentDescriptorSetSampler)>,
|
||||
PersistentDescriptorSetError> {
|
||||
Ok(FixedSizeDescriptorSetBuilder {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampler(sampler)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampler(sampler)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -407,13 +419,17 @@ pub struct FixedSizeDescriptorSetBuilderArray<'a, L: 'a, R> {
|
||||
inner: PersistentDescriptorSetBuilderArray<L, R>,
|
||||
}
|
||||
|
||||
impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLayoutAbstract {
|
||||
impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R>
|
||||
where L: PipelineLayoutAbstract
|
||||
{
|
||||
/// Leaves the array. Call this once you added all the elements of the array.
|
||||
pub fn leave_array(self) -> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
|
||||
pub fn leave_array(
|
||||
self)
|
||||
-> Result<FixedSizeDescriptorSetBuilder<'a, L, R>, PersistentDescriptorSetError> {
|
||||
Ok(FixedSizeDescriptorSetBuilder {
|
||||
pool: self.pool,
|
||||
inner: self.inner.leave_array()?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.leave_array()?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a buffer as the next element in the array.
|
||||
@ -429,9 +445,9 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
|
||||
where T: BufferAccess
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilderArray {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer(buffer)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer(buffer)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a buffer view as the next element in the array.
|
||||
@ -447,9 +463,9 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
|
||||
where T: BufferViewRef
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilderArray {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer_view(view)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_buffer_view(view)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds an image view as the next element in the array.
|
||||
@ -465,9 +481,9 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
|
||||
where T: ImageViewAccess
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilderArray {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_image(image_view)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_image(image_view)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds an image view with a sampler as the next element in the array.
|
||||
@ -483,9 +499,9 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
|
||||
where T: ImageViewAccess
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilderArray {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampled_image(image_view, sampler)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampled_image(image_view, sampler)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a sampler as the next element in the array.
|
||||
@ -500,8 +516,8 @@ impl<'a, L, R> FixedSizeDescriptorSetBuilderArray<'a, L, R> where L: PipelineLay
|
||||
-> Result<FixedSizeDescriptorSetBuilderArray<'a, L, (R, PersistentDescriptorSetSampler)>, PersistentDescriptorSetError>
|
||||
{
|
||||
Ok(FixedSizeDescriptorSetBuilderArray {
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampler(sampler)?
|
||||
})
|
||||
pool: self.pool,
|
||||
inner: self.inner.add_sampler(sampler)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -41,16 +41,16 @@ use descriptor::descriptor::DescriptorDesc;
|
||||
use image::ImageViewAccess;
|
||||
|
||||
pub use self::collection::DescriptorSetsCollection;
|
||||
pub use self::fixed_size_pool::FixedSizeDescriptorSetsPool;
|
||||
pub use self::fixed_size_pool::FixedSizeDescriptorSet;
|
||||
pub use self::fixed_size_pool::FixedSizeDescriptorSetBuilder;
|
||||
pub use self::fixed_size_pool::FixedSizeDescriptorSetBuilderArray;
|
||||
pub use self::fixed_size_pool::FixedSizeDescriptorSetsPool;
|
||||
pub use self::persistent::PersistentDescriptorSet;
|
||||
pub use self::persistent::PersistentDescriptorSetBuf;
|
||||
pub use self::persistent::PersistentDescriptorSetBufView;
|
||||
pub use self::persistent::PersistentDescriptorSetBuildError;
|
||||
pub use self::persistent::PersistentDescriptorSetBuilder;
|
||||
pub use self::persistent::PersistentDescriptorSetBuilderArray;
|
||||
pub use self::persistent::PersistentDescriptorSetBuf;
|
||||
pub use self::persistent::PersistentDescriptorSetBufView;
|
||||
pub use self::persistent::PersistentDescriptorSetError;
|
||||
pub use self::persistent::PersistentDescriptorSetImg;
|
||||
pub use self::persistent::PersistentDescriptorSetSampler;
|
||||
|
@ -11,6 +11,8 @@ use std::error;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use buffer::BufferViewRef;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
@ -19,22 +21,20 @@ use descriptor::descriptor::DescriptorImageDesc;
|
||||
use descriptor::descriptor::DescriptorImageDescArray;
|
||||
use descriptor::descriptor::DescriptorImageDescDimensions;
|
||||
use descriptor::descriptor::DescriptorType;
|
||||
use descriptor::descriptor_set::DescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSetDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::descriptor_set::DescriptorPool;
|
||||
use descriptor::descriptor_set::DescriptorPoolAlloc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSetDesc;
|
||||
use descriptor::descriptor_set::DescriptorWrite;
|
||||
use descriptor::descriptor_set::StdDescriptorPoolAlloc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSet;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use format::Format;
|
||||
use image::ImageViewAccess;
|
||||
use sampler::Sampler;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
|
||||
/// An immutable descriptor set that is expected to be long-lived.
|
||||
///
|
||||
@ -59,7 +59,7 @@ pub struct PersistentDescriptorSet<L, R, P = StdDescriptorPoolAlloc> {
|
||||
resources: R,
|
||||
pipeline_layout: L,
|
||||
set_id: usize,
|
||||
layout: Arc<UnsafeDescriptorSetLayout>
|
||||
layout: Arc<UnsafeDescriptorSetLayout>,
|
||||
}
|
||||
|
||||
impl<L> PersistentDescriptorSet<L, ()> {
|
||||
@ -122,7 +122,9 @@ unsafe impl<L, R, P> DescriptorSetDesc for PersistentDescriptorSet<L, R, P>
|
||||
{
|
||||
#[inline]
|
||||
fn num_bindings(&self) -> usize {
|
||||
self.pipeline_layout.num_bindings_in_set(self.set_id).unwrap()
|
||||
self.pipeline_layout
|
||||
.num_bindings_in_set(self.set_id)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -167,7 +169,9 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
{
|
||||
/// Builds a `PersistentDescriptorSet` from the builder.
|
||||
#[inline]
|
||||
pub fn build(self) -> Result<PersistentDescriptorSet<L, R, StdDescriptorPoolAlloc>, PersistentDescriptorSetBuildError> {
|
||||
pub fn build(self)
|
||||
-> Result<PersistentDescriptorSet<L, R, StdDescriptorPoolAlloc>,
|
||||
PersistentDescriptorSetBuildError> {
|
||||
let mut pool = Device::standard_descriptor_pool(self.layout.device());
|
||||
self.build_with_pool(&mut pool)
|
||||
}
|
||||
@ -178,8 +182,9 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
///
|
||||
/// Panics if the pool doesn't have the same device as the pipeline layout.
|
||||
///
|
||||
pub fn build_with_pool<P>(self, pool: &mut P)
|
||||
-> Result<PersistentDescriptorSet<L, R, P::Alloc>, PersistentDescriptorSetBuildError>
|
||||
pub fn build_with_pool<P>(
|
||||
self, pool: &mut P)
|
||||
-> Result<PersistentDescriptorSet<L, R, P::Alloc>, PersistentDescriptorSetBuildError>
|
||||
where P: ?Sized + DescriptorPool
|
||||
{
|
||||
assert_eq!(self.layout.device().internal_object(),
|
||||
@ -189,30 +194,32 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
|
||||
if expected_desc > self.binding_id {
|
||||
return Err(PersistentDescriptorSetBuildError::MissingDescriptors {
|
||||
expected: expected_desc as u32,
|
||||
obtained: self.binding_id as u32,
|
||||
});
|
||||
expected: expected_desc as u32,
|
||||
obtained: self.binding_id as u32,
|
||||
});
|
||||
}
|
||||
|
||||
debug_assert_eq!(expected_desc, self.binding_id);
|
||||
|
||||
let set_layout = self.layout.descriptor_set_layout(self.set_id)
|
||||
let set_layout = self.layout
|
||||
.descriptor_set_layout(self.set_id)
|
||||
.expect("Unable to get the descriptor set layout")
|
||||
.clone();
|
||||
|
||||
let set = unsafe {
|
||||
let mut set = pool.alloc(&set_layout)?;
|
||||
set.inner_mut().write(pool.device(), self.writes.into_iter());
|
||||
set.inner_mut()
|
||||
.write(pool.device(), self.writes.into_iter());
|
||||
set
|
||||
};
|
||||
|
||||
Ok(PersistentDescriptorSet {
|
||||
inner: set,
|
||||
resources: self.resources,
|
||||
pipeline_layout: self.layout,
|
||||
set_id: self.set_id,
|
||||
layout: set_layout,
|
||||
})
|
||||
inner: set,
|
||||
resources: self.resources,
|
||||
pipeline_layout: self.layout,
|
||||
set_id: self.set_id,
|
||||
layout: set_layout,
|
||||
})
|
||||
}
|
||||
|
||||
/// Call this function if the next element of the set is an array in order to set the value of
|
||||
@ -223,27 +230,31 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
/// This function can be called even if the descriptor isn't an array, and it is valid to enter
|
||||
/// the "array", add one element, then leave.
|
||||
#[inline]
|
||||
pub fn enter_array(self) -> Result<PersistentDescriptorSetBuilderArray<L, R>, PersistentDescriptorSetError> {
|
||||
pub fn enter_array(
|
||||
self)
|
||||
-> Result<PersistentDescriptorSetBuilderArray<L, R>, PersistentDescriptorSetError> {
|
||||
let desc = match self.layout.descriptor(self.set_id, self.binding_id) {
|
||||
Some(d) => d,
|
||||
None => return Err(PersistentDescriptorSetError::EmptyExpected),
|
||||
};
|
||||
|
||||
Ok(PersistentDescriptorSetBuilderArray {
|
||||
builder: self,
|
||||
desc,
|
||||
array_element: 0,
|
||||
})
|
||||
builder: self,
|
||||
desc,
|
||||
array_element: 0,
|
||||
})
|
||||
}
|
||||
|
||||
/// Skips the current descriptor if it is empty.
|
||||
#[inline]
|
||||
pub fn add_empty(mut self) -> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
|
||||
pub fn add_empty(
|
||||
mut self)
|
||||
-> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
|
||||
match self.layout.descriptor(self.set_id, self.binding_id) {
|
||||
None => (),
|
||||
Some(desc) => return Err(PersistentDescriptorSetError::WrongDescriptorTy {
|
||||
expected: desc.ty.ty().unwrap()
|
||||
}),
|
||||
expected: desc.ty.ty().unwrap(),
|
||||
}),
|
||||
}
|
||||
|
||||
self.binding_id += 1;
|
||||
@ -260,12 +271,13 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
///
|
||||
#[inline]
|
||||
pub fn add_buffer<T>(self, buffer: T)
|
||||
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetBuf<T>)>, PersistentDescriptorSetError>
|
||||
-> Result<PersistentDescriptorSetBuilder<L,
|
||||
(R,
|
||||
PersistentDescriptorSetBuf<T>)>,
|
||||
PersistentDescriptorSetError>
|
||||
where T: BufferAccess
|
||||
{
|
||||
self.enter_array()?
|
||||
.add_buffer(buffer)?
|
||||
.leave_array()
|
||||
self.enter_array()?.add_buffer(buffer)?.leave_array()
|
||||
}
|
||||
|
||||
/// Binds a buffer view as the next descriptor.
|
||||
@ -280,9 +292,7 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetBufView<T>)>, PersistentDescriptorSetError>
|
||||
where T: BufferViewRef
|
||||
{
|
||||
self.enter_array()?
|
||||
.add_buffer_view(view)?
|
||||
.leave_array()
|
||||
self.enter_array()?.add_buffer_view(view)?.leave_array()
|
||||
}
|
||||
|
||||
/// Binds an image view as the next descriptor.
|
||||
@ -295,12 +305,13 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
///
|
||||
#[inline]
|
||||
pub fn add_image<T>(self, image_view: T)
|
||||
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetImg<T>)>, PersistentDescriptorSetError>
|
||||
-> Result<PersistentDescriptorSetBuilder<L,
|
||||
(R,
|
||||
PersistentDescriptorSetImg<T>)>,
|
||||
PersistentDescriptorSetError>
|
||||
where T: ImageViewAccess
|
||||
{
|
||||
self.enter_array()?
|
||||
.add_image(image_view)?
|
||||
.leave_array()
|
||||
self.enter_array()?.add_image(image_view)?.leave_array()
|
||||
}
|
||||
|
||||
/// Binds an image view with a sampler as the next descriptor.
|
||||
@ -331,11 +342,11 @@ impl<L, R> PersistentDescriptorSetBuilder<L, R>
|
||||
///
|
||||
#[inline]
|
||||
pub fn add_sampler(self, sampler: Arc<Sampler>)
|
||||
-> Result<PersistentDescriptorSetBuilder<L, (R, PersistentDescriptorSetSampler)>, PersistentDescriptorSetError>
|
||||
{
|
||||
self.enter_array()?
|
||||
.add_sampler(sampler)?
|
||||
.leave_array()
|
||||
-> Result<PersistentDescriptorSetBuilder<L,
|
||||
(R,
|
||||
PersistentDescriptorSetSampler)>,
|
||||
PersistentDescriptorSetError> {
|
||||
self.enter_array()?.add_sampler(sampler)?.leave_array()
|
||||
}
|
||||
}
|
||||
|
||||
@ -349,14 +360,18 @@ pub struct PersistentDescriptorSetBuilderArray<L, R> {
|
||||
desc: DescriptorDesc,
|
||||
}
|
||||
|
||||
impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbstract {
|
||||
impl<L, R> PersistentDescriptorSetBuilderArray<L, R>
|
||||
where L: PipelineLayoutAbstract
|
||||
{
|
||||
/// Leaves the array. Call this once you added all the elements of the array.
|
||||
pub fn leave_array(mut self) -> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
|
||||
pub fn leave_array(
|
||||
mut self)
|
||||
-> Result<PersistentDescriptorSetBuilder<L, R>, PersistentDescriptorSetError> {
|
||||
if self.desc.array_count > self.array_element as u32 {
|
||||
return Err(PersistentDescriptorSetError::MissingArrayElements {
|
||||
expected: self.desc.array_count,
|
||||
obtained: self.array_element as u32,
|
||||
});
|
||||
expected: self.desc.array_count,
|
||||
obtained: self.array_element as u32,
|
||||
});
|
||||
}
|
||||
|
||||
debug_assert_eq!(self.desc.array_count, self.array_element as u32);
|
||||
@ -395,7 +410,11 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
// TODO: eventually shouldn't be an assert ; for now robust_buffer_access is always
|
||||
// enabled so this assert should never fail in practice, but we put it anyway
|
||||
// in case we forget to adjust this code
|
||||
assert!(self.builder.layout.device().enabled_features().robust_buffer_access);
|
||||
assert!(self.builder
|
||||
.layout
|
||||
.device()
|
||||
.enabled_features()
|
||||
.robust_buffer_access);
|
||||
|
||||
if buffer_desc.storage {
|
||||
if !buffer.inner().buffer.usage_storage_buffer() {
|
||||
@ -404,7 +423,8 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
|
||||
unsafe {
|
||||
DescriptorWrite::storage_buffer(self.builder.binding_id as u32,
|
||||
self.array_element as u32, &buffer)
|
||||
self.array_element as u32,
|
||||
&buffer)
|
||||
}
|
||||
} else {
|
||||
if !buffer.inner().buffer.usage_uniform_buffer() {
|
||||
@ -413,31 +433,33 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
|
||||
unsafe {
|
||||
DescriptorWrite::uniform_buffer(self.builder.binding_id as u32,
|
||||
self.array_element as u32, &buffer)
|
||||
self.array_element as u32,
|
||||
&buffer)
|
||||
}
|
||||
}
|
||||
},
|
||||
ref d => {
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
|
||||
expected: d.ty().unwrap()
|
||||
});
|
||||
expected: d.ty().unwrap(),
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
Ok(PersistentDescriptorSetBuilderArray {
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources, PersistentDescriptorSetBuf {
|
||||
buffer: buffer,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
})
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources,
|
||||
PersistentDescriptorSetBuf {
|
||||
buffer: buffer,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
}),
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a buffer view as the next element in the array.
|
||||
@ -483,25 +505,26 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
},
|
||||
ref d => {
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
|
||||
expected: d.ty().unwrap()
|
||||
});
|
||||
expected: d.ty().unwrap(),
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
Ok(PersistentDescriptorSetBuilderArray {
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources, PersistentDescriptorSetBufView {
|
||||
view: view,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
})
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources,
|
||||
PersistentDescriptorSetBufView {
|
||||
view: view,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
}),
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds an image view as the next element in the array.
|
||||
@ -523,7 +546,9 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
return Err(PersistentDescriptorSetError::ArrayOutOfBounds);
|
||||
}
|
||||
|
||||
let desc = match self.builder.layout.descriptor(self.builder.set_id, self.builder.binding_id) {
|
||||
let desc = match self.builder
|
||||
.layout
|
||||
.descriptor(self.builder.set_id, self.builder.binding_id) {
|
||||
Some(d) => d,
|
||||
None => return Err(PersistentDescriptorSetError::EmptyExpected),
|
||||
};
|
||||
@ -533,12 +558,19 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
image_match_desc(&image_view, &desc)?;
|
||||
|
||||
if desc.sampled {
|
||||
DescriptorWrite::sampled_image(self.builder.binding_id as u32, self.array_element as u32, &image_view)
|
||||
DescriptorWrite::sampled_image(self.builder.binding_id as u32,
|
||||
self.array_element as u32,
|
||||
&image_view)
|
||||
} else {
|
||||
DescriptorWrite::storage_image(self.builder.binding_id as u32, self.array_element as u32, &image_view)
|
||||
DescriptorWrite::storage_image(self.builder.binding_id as u32,
|
||||
self.array_element as u32,
|
||||
&image_view)
|
||||
}
|
||||
},
|
||||
DescriptorDescTy::InputAttachment { multisampled, array_layers } => {
|
||||
DescriptorDescTy::InputAttachment {
|
||||
multisampled,
|
||||
array_layers,
|
||||
} => {
|
||||
if !image_view.parent().inner().image.usage_input_attachment() {
|
||||
return Err(PersistentDescriptorSetError::MissingUsage);
|
||||
}
|
||||
@ -555,43 +587,49 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
DescriptorImageDescArray::NonArrayed => {
|
||||
if image_layers != 1 {
|
||||
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
|
||||
expected: 1,
|
||||
obtained: image_layers,
|
||||
})
|
||||
expected: 1,
|
||||
obtained: image_layers,
|
||||
});
|
||||
}
|
||||
},
|
||||
DescriptorImageDescArray::Arrayed { max_layers: Some(max_layers) } => {
|
||||
if image_layers > max_layers { // TODO: is this correct? "max" layers? or is it in fact min layers?
|
||||
if image_layers > max_layers {
|
||||
// TODO: is this correct? "max" layers? or is it in fact min layers?
|
||||
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
|
||||
expected: max_layers,
|
||||
obtained: image_layers,
|
||||
})
|
||||
expected: max_layers,
|
||||
obtained: image_layers,
|
||||
});
|
||||
}
|
||||
},
|
||||
DescriptorImageDescArray::Arrayed { max_layers: None } => {},
|
||||
};
|
||||
|
||||
DescriptorWrite::input_attachment(self.builder.binding_id as u32, self.array_element as u32, &image_view)
|
||||
DescriptorWrite::input_attachment(self.builder.binding_id as u32,
|
||||
self.array_element as u32,
|
||||
&image_view)
|
||||
},
|
||||
ty => {
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy { expected: ty.ty().unwrap() });
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
|
||||
expected: ty.ty().unwrap(),
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
Ok(PersistentDescriptorSetBuilderArray {
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources, PersistentDescriptorSetImg {
|
||||
image: image_view,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
})
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources,
|
||||
PersistentDescriptorSetImg {
|
||||
image: image_view,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
}),
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds an image view with a sampler as the next element in the array.
|
||||
@ -615,7 +653,9 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
return Err(PersistentDescriptorSetError::ArrayOutOfBounds);
|
||||
}
|
||||
|
||||
let desc = match self.builder.layout.descriptor(self.builder.set_id, self.builder.binding_id) {
|
||||
let desc = match self.builder
|
||||
.layout
|
||||
.descriptor(self.builder.set_id, self.builder.binding_id) {
|
||||
Some(d) => d,
|
||||
None => return Err(PersistentDescriptorSetError::EmptyExpected),
|
||||
};
|
||||
@ -627,29 +667,34 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
self.builder.writes.push(match desc.ty {
|
||||
DescriptorDescTy::CombinedImageSampler(ref desc) => {
|
||||
image_match_desc(&image_view, &desc)?;
|
||||
DescriptorWrite::combined_image_sampler(self.builder.binding_id as u32, self.array_element as u32, &sampler, &image_view)
|
||||
DescriptorWrite::combined_image_sampler(self.builder.binding_id as u32,
|
||||
self.array_element as u32,
|
||||
&sampler,
|
||||
&image_view)
|
||||
},
|
||||
ty => {
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy { expected: ty.ty().unwrap() });
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
|
||||
expected: ty.ty().unwrap(),
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
Ok(PersistentDescriptorSetBuilderArray {
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: ((self.builder.resources, PersistentDescriptorSetImg {
|
||||
image: image_view,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
}), PersistentDescriptorSetSampler {
|
||||
sampler: sampler,
|
||||
}),
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: ((self.builder.resources,
|
||||
PersistentDescriptorSetImg {
|
||||
image: image_view,
|
||||
descriptor_num: self.builder.binding_id as u32,
|
||||
}),
|
||||
PersistentDescriptorSetSampler { sampler: sampler }),
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
}
|
||||
|
||||
/// Binds a sampler as the next element in the array.
|
||||
@ -670,33 +715,38 @@ impl<L, R> PersistentDescriptorSetBuilderArray<L, R> where L: PipelineLayoutAbst
|
||||
return Err(PersistentDescriptorSetError::ArrayOutOfBounds);
|
||||
}
|
||||
|
||||
let desc = match self.builder.layout.descriptor(self.builder.set_id, self.builder.binding_id) {
|
||||
let desc = match self.builder
|
||||
.layout
|
||||
.descriptor(self.builder.set_id, self.builder.binding_id) {
|
||||
Some(d) => d,
|
||||
None => return Err(PersistentDescriptorSetError::EmptyExpected),
|
||||
};
|
||||
|
||||
self.builder.writes.push(match desc.ty {
|
||||
DescriptorDescTy::Sampler => {
|
||||
DescriptorWrite::sampler(self.builder.binding_id as u32, self.array_element as u32, &sampler)
|
||||
DescriptorWrite::sampler(self.builder.binding_id as u32,
|
||||
self.array_element as u32,
|
||||
&sampler)
|
||||
},
|
||||
ty => {
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy { expected: ty.ty().unwrap() });
|
||||
return Err(PersistentDescriptorSetError::WrongDescriptorTy {
|
||||
expected: ty.ty().unwrap(),
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
Ok(PersistentDescriptorSetBuilderArray {
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources, PersistentDescriptorSetSampler {
|
||||
sampler: sampler,
|
||||
}),
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
builder: PersistentDescriptorSetBuilder {
|
||||
layout: self.builder.layout,
|
||||
set_id: self.builder.set_id,
|
||||
binding_id: self.builder.binding_id,
|
||||
writes: self.builder.writes,
|
||||
resources: (self.builder.resources,
|
||||
PersistentDescriptorSetSampler { sampler: sampler }),
|
||||
},
|
||||
desc: self.desc,
|
||||
array_element: self.array_element + 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -707,24 +757,24 @@ fn image_match_desc<I>(image_view: &I, desc: &DescriptorImageDesc)
|
||||
{
|
||||
if desc.sampled && !image_view.parent().inner().image.usage_sampled() {
|
||||
return Err(PersistentDescriptorSetError::MissingUsage);
|
||||
} else if !desc.sampled && !image_view.parent().inner().image.usage_storage() {
|
||||
} else if !desc.sampled && !image_view.parent().inner().image.usage_storage() {
|
||||
return Err(PersistentDescriptorSetError::MissingUsage);
|
||||
}
|
||||
|
||||
let image_view_ty = DescriptorImageDescDimensions::from_dimensions(image_view.dimensions());
|
||||
if image_view_ty != desc.dimensions {
|
||||
return Err(PersistentDescriptorSetError::ImageViewTypeMismatch {
|
||||
expected: desc.dimensions,
|
||||
obtained: image_view_ty,
|
||||
});
|
||||
expected: desc.dimensions,
|
||||
obtained: image_view_ty,
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(format) = desc.format {
|
||||
if image_view.format() != format {
|
||||
return Err(PersistentDescriptorSetError::ImageViewFormatMismatch {
|
||||
expected: format,
|
||||
obtained: image_view.format(),
|
||||
});
|
||||
expected: format,
|
||||
obtained: image_view.format(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -742,17 +792,18 @@ fn image_match_desc<I>(image_view: &I, desc: &DescriptorImageDesc)
|
||||
// array with one layer? need to check
|
||||
if image_layers != 1 {
|
||||
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
|
||||
expected: 1,
|
||||
obtained: image_layers,
|
||||
})
|
||||
expected: 1,
|
||||
obtained: image_layers,
|
||||
});
|
||||
}
|
||||
},
|
||||
DescriptorImageDescArray::Arrayed { max_layers: Some(max_layers) } => {
|
||||
if image_layers > max_layers { // TODO: is this correct? "max" layers? or is it in fact min layers?
|
||||
if image_layers > max_layers {
|
||||
// TODO: is this correct? "max" layers? or is it in fact min layers?
|
||||
return Err(PersistentDescriptorSetError::ArrayLayersMismatch {
|
||||
expected: max_layers,
|
||||
obtained: image_layers,
|
||||
})
|
||||
expected: max_layers,
|
||||
obtained: image_layers,
|
||||
});
|
||||
}
|
||||
},
|
||||
DescriptorImageDescArray::Arrayed { max_layers: None } => {},
|
||||
@ -798,7 +849,7 @@ pub struct PersistentDescriptorSetBuf<B> {
|
||||
|
||||
unsafe impl<R, B> PersistentDescriptorSetResources for (R, PersistentDescriptorSetBuf<B>)
|
||||
where R: PersistentDescriptorSetResources,
|
||||
B: BufferAccess,
|
||||
B: BufferAccess
|
||||
{
|
||||
#[inline]
|
||||
fn num_buffers(&self) -> usize {
|
||||
@ -837,7 +888,7 @@ pub struct PersistentDescriptorSetBufView<V>
|
||||
|
||||
unsafe impl<R, V> PersistentDescriptorSetResources for (R, PersistentDescriptorSetBufView<V>)
|
||||
where R: PersistentDescriptorSetResources,
|
||||
V: BufferViewRef,
|
||||
V: BufferViewRef
|
||||
{
|
||||
#[inline]
|
||||
fn num_buffers(&self) -> usize {
|
||||
@ -874,7 +925,7 @@ pub struct PersistentDescriptorSetImg<I> {
|
||||
|
||||
unsafe impl<R, I> PersistentDescriptorSetResources for (R, PersistentDescriptorSetImg<I>)
|
||||
where R: PersistentDescriptorSetResources,
|
||||
I: ImageViewAccess,
|
||||
I: ImageViewAccess
|
||||
{
|
||||
#[inline]
|
||||
fn num_buffers(&self) -> usize {
|
||||
|
@ -180,14 +180,14 @@ impl Drop for StdDescriptorPoolAlloc {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::DescriptorDescTy;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::DescriptorPool;
|
||||
use descriptor::descriptor_set::StdDescriptorPool;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn desc_pool_kept_alive() {
|
||||
@ -200,7 +200,8 @@ mod tests {
|
||||
stages: ShaderStages::all(),
|
||||
readonly: false,
|
||||
};
|
||||
let layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(desc))).unwrap();
|
||||
let layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(desc)))
|
||||
.unwrap();
|
||||
|
||||
let mut pool = Arc::new(StdDescriptorPool::new(device));
|
||||
let pool_weak = Arc::downgrade(&pool);
|
||||
|
@ -829,8 +829,7 @@ impl DescriptorWrite {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
|
||||
view: &BufferView<F, B>)
|
||||
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &BufferView<F, B>)
|
||||
-> DescriptorWrite
|
||||
where B: BufferAccess
|
||||
{
|
||||
@ -844,8 +843,7 @@ impl DescriptorWrite {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
|
||||
view: &BufferView<F, B>)
|
||||
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &BufferView<F, B>)
|
||||
-> DescriptorWrite
|
||||
where B: BufferAccess
|
||||
{
|
||||
@ -865,10 +863,20 @@ impl DescriptorWrite {
|
||||
let size = buffer.size();
|
||||
let BufferInner { buffer, offset } = buffer.inner();
|
||||
|
||||
debug_assert_eq!(offset % buffer.device().physical_device().limits()
|
||||
.min_uniform_buffer_offset_alignment() as usize, 0);
|
||||
debug_assert!(size <= buffer.device().physical_device().limits()
|
||||
.max_uniform_buffer_range() as usize);
|
||||
debug_assert_eq!(offset %
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.min_uniform_buffer_offset_alignment() as
|
||||
usize,
|
||||
0);
|
||||
debug_assert!(size <=
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_uniform_buffer_range() as usize);
|
||||
|
||||
DescriptorWrite {
|
||||
binding: binding,
|
||||
@ -888,10 +896,20 @@ impl DescriptorWrite {
|
||||
let size = buffer.size();
|
||||
let BufferInner { buffer, offset } = buffer.inner();
|
||||
|
||||
debug_assert_eq!(offset % buffer.device().physical_device().limits()
|
||||
.min_storage_buffer_offset_alignment() as usize, 0);
|
||||
debug_assert!(size <= buffer.device().physical_device().limits()
|
||||
.max_storage_buffer_range() as usize);
|
||||
debug_assert_eq!(offset %
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.min_storage_buffer_offset_alignment() as
|
||||
usize,
|
||||
0);
|
||||
debug_assert!(size <=
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_storage_buffer_range() as usize);
|
||||
|
||||
DescriptorWrite {
|
||||
binding: binding,
|
||||
@ -912,10 +930,20 @@ impl DescriptorWrite {
|
||||
let size = buffer.size();
|
||||
let BufferInner { buffer, offset } = buffer.inner();
|
||||
|
||||
debug_assert_eq!(offset % buffer.device().physical_device().limits()
|
||||
.min_uniform_buffer_offset_alignment() as usize, 0);
|
||||
debug_assert!(size <= buffer.device().physical_device().limits()
|
||||
.max_uniform_buffer_range() as usize);
|
||||
debug_assert_eq!(offset %
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.min_uniform_buffer_offset_alignment() as
|
||||
usize,
|
||||
0);
|
||||
debug_assert!(size <=
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_uniform_buffer_range() as usize);
|
||||
|
||||
DescriptorWrite {
|
||||
binding: binding,
|
||||
@ -934,10 +962,20 @@ impl DescriptorWrite {
|
||||
let size = buffer.size();
|
||||
let BufferInner { buffer, offset } = buffer.inner();
|
||||
|
||||
debug_assert_eq!(offset % buffer.device().physical_device().limits()
|
||||
.min_storage_buffer_offset_alignment() as usize, 0);
|
||||
debug_assert!(size <= buffer.device().physical_device().limits()
|
||||
.max_storage_buffer_range() as usize);
|
||||
debug_assert_eq!(offset %
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.min_storage_buffer_offset_alignment() as
|
||||
usize,
|
||||
0);
|
||||
debug_assert!(size <=
|
||||
buffer
|
||||
.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_storage_buffer_range() as usize);
|
||||
|
||||
DescriptorWrite {
|
||||
binding: binding,
|
||||
@ -1084,13 +1122,14 @@ mod tests {
|
||||
|
||||
assert_should_panic!("Tried to allocate from a pool with a set layout \
|
||||
of a different device",
|
||||
{
|
||||
let mut pool = UnsafeDescriptorPool::new(device2, &desc, 10, false).unwrap();
|
||||
{
|
||||
let mut pool =
|
||||
UnsafeDescriptorPool::new(device2, &desc, 10, false).unwrap();
|
||||
|
||||
unsafe {
|
||||
let _ = pool.alloc(iter::once(&set_layout));
|
||||
}
|
||||
});
|
||||
unsafe {
|
||||
let _ = pool.alloc(iter::once(&set_layout));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -55,10 +55,12 @@ pub fn check_desc_against_limits<D>(desc: &D, limits: Limits)
|
||||
num_samplers.increment(descriptor.array_count, &descriptor.stages);
|
||||
num_sampled_images.increment(descriptor.array_count, &descriptor.stages);
|
||||
},
|
||||
DescriptorType::SampledImage | DescriptorType::UniformTexelBuffer => {
|
||||
DescriptorType::SampledImage |
|
||||
DescriptorType::UniformTexelBuffer => {
|
||||
num_sampled_images.increment(descriptor.array_count, &descriptor.stages);
|
||||
},
|
||||
DescriptorType::StorageImage | DescriptorType::StorageTexelBuffer => {
|
||||
DescriptorType::StorageImage |
|
||||
DescriptorType::StorageTexelBuffer => {
|
||||
num_storage_images.increment(descriptor.array_count, &descriptor.stages);
|
||||
},
|
||||
DescriptorType::UniformBuffer => {
|
||||
@ -84,110 +86,106 @@ pub fn check_desc_against_limits<D>(desc: &D, limits: Limits)
|
||||
|
||||
if desc.num_sets() > limits.max_bound_descriptor_sets() as usize {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetsLimitExceeded {
|
||||
limit: limits.max_bound_descriptor_sets() as usize,
|
||||
requested: desc.num_sets(),
|
||||
});
|
||||
limit: limits.max_bound_descriptor_sets() as usize,
|
||||
requested: desc.num_sets(),
|
||||
});
|
||||
}
|
||||
|
||||
if num_resources.max_per_stage() > limits.max_per_stage_resources() {
|
||||
return Err(PipelineLayoutLimitsError::MaxPerStageResourcesLimitExceeded {
|
||||
limit: limits.max_per_stage_resources(),
|
||||
requested: num_resources.max_per_stage(),
|
||||
});
|
||||
limit: limits.max_per_stage_resources(),
|
||||
requested: num_resources.max_per_stage(),
|
||||
});
|
||||
}
|
||||
|
||||
if num_samplers.max_per_stage() > limits.max_per_stage_descriptor_samplers() {
|
||||
return Err(PipelineLayoutLimitsError::MaxPerStageDescriptorSamplersLimitExceeded {
|
||||
limit: limits.max_per_stage_descriptor_samplers(),
|
||||
requested: num_samplers.max_per_stage(),
|
||||
});
|
||||
limit: limits.max_per_stage_descriptor_samplers(),
|
||||
requested: num_samplers.max_per_stage(),
|
||||
});
|
||||
}
|
||||
if num_uniform_buffers.max_per_stage() > limits.max_per_stage_descriptor_uniform_buffers() {
|
||||
return Err(PipelineLayoutLimitsError::MaxPerStageDescriptorUniformBuffersLimitExceeded {
|
||||
limit: limits.max_per_stage_descriptor_uniform_buffers(),
|
||||
requested: num_uniform_buffers.max_per_stage(),
|
||||
});
|
||||
limit: limits.max_per_stage_descriptor_uniform_buffers(),
|
||||
requested: num_uniform_buffers.max_per_stage(),
|
||||
});
|
||||
}
|
||||
if num_storage_buffers.max_per_stage() > limits.max_per_stage_descriptor_storage_buffers() {
|
||||
return Err(PipelineLayoutLimitsError::MaxPerStageDescriptorStorageBuffersLimitExceeded {
|
||||
limit: limits.max_per_stage_descriptor_storage_buffers(),
|
||||
requested: num_storage_buffers.max_per_stage(),
|
||||
});
|
||||
limit: limits.max_per_stage_descriptor_storage_buffers(),
|
||||
requested: num_storage_buffers.max_per_stage(),
|
||||
});
|
||||
}
|
||||
if num_sampled_images.max_per_stage() > limits.max_per_stage_descriptor_sampled_images() {
|
||||
return Err(PipelineLayoutLimitsError::MaxPerStageDescriptorSampledImagesLimitExceeded {
|
||||
limit: limits.max_per_stage_descriptor_sampled_images(),
|
||||
requested: num_sampled_images.max_per_stage(),
|
||||
});
|
||||
limit: limits.max_per_stage_descriptor_sampled_images(),
|
||||
requested: num_sampled_images.max_per_stage(),
|
||||
});
|
||||
}
|
||||
if num_storage_images.max_per_stage() > limits.max_per_stage_descriptor_storage_images() {
|
||||
return Err(PipelineLayoutLimitsError::MaxPerStageDescriptorStorageImagesLimitExceeded {
|
||||
limit: limits.max_per_stage_descriptor_storage_images(),
|
||||
requested: num_storage_images.max_per_stage(),
|
||||
});
|
||||
limit: limits.max_per_stage_descriptor_storage_images(),
|
||||
requested: num_storage_images.max_per_stage(),
|
||||
});
|
||||
}
|
||||
if num_input_attachments.max_per_stage() > limits.max_per_stage_descriptor_input_attachments() {
|
||||
return Err(PipelineLayoutLimitsError::MaxPerStageDescriptorInputAttachmentsLimitExceeded {
|
||||
limit: limits.max_per_stage_descriptor_input_attachments(),
|
||||
requested: num_input_attachments.max_per_stage(),
|
||||
});
|
||||
limit: limits.max_per_stage_descriptor_input_attachments(),
|
||||
requested: num_input_attachments.max_per_stage(),
|
||||
});
|
||||
}
|
||||
|
||||
if num_samplers.total > limits.max_descriptor_set_samplers() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetSamplersLimitExceeded {
|
||||
limit: limits.max_descriptor_set_samplers(),
|
||||
requested: num_samplers.total,
|
||||
});
|
||||
limit: limits.max_descriptor_set_samplers(),
|
||||
requested: num_samplers.total,
|
||||
});
|
||||
}
|
||||
if num_uniform_buffers.total > limits.max_descriptor_set_uniform_buffers() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersLimitExceeded {
|
||||
limit: limits.max_descriptor_set_uniform_buffers(),
|
||||
requested: num_uniform_buffers.total,
|
||||
});
|
||||
limit: limits.max_descriptor_set_uniform_buffers(),
|
||||
requested: num_uniform_buffers.total,
|
||||
});
|
||||
}
|
||||
if num_uniform_buffers_dynamic > limits.max_descriptor_set_uniform_buffers_dynamic() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersDynamicLimitExceeded {
|
||||
limit: limits.max_descriptor_set_uniform_buffers_dynamic(),
|
||||
requested: num_uniform_buffers_dynamic,
|
||||
});
|
||||
limit: limits.max_descriptor_set_uniform_buffers_dynamic(),
|
||||
requested: num_uniform_buffers_dynamic,
|
||||
});
|
||||
}
|
||||
if num_storage_buffers.total > limits.max_descriptor_set_storage_buffers() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersLimitExceeded {
|
||||
limit: limits.max_descriptor_set_storage_buffers(),
|
||||
requested: num_storage_buffers.total,
|
||||
});
|
||||
limit: limits.max_descriptor_set_storage_buffers(),
|
||||
requested: num_storage_buffers.total,
|
||||
});
|
||||
}
|
||||
if num_storage_buffers_dynamic > limits.max_descriptor_set_storage_buffers_dynamic() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersDynamicLimitExceeded {
|
||||
limit: limits.max_descriptor_set_storage_buffers_dynamic(),
|
||||
requested: num_storage_buffers_dynamic,
|
||||
});
|
||||
limit: limits.max_descriptor_set_storage_buffers_dynamic(),
|
||||
requested: num_storage_buffers_dynamic,
|
||||
});
|
||||
}
|
||||
if num_sampled_images.total > limits.max_descriptor_set_sampled_images() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetSampledImagesLimitExceeded {
|
||||
limit: limits.max_descriptor_set_sampled_images(),
|
||||
requested: num_sampled_images.total,
|
||||
});
|
||||
limit: limits.max_descriptor_set_sampled_images(),
|
||||
requested: num_sampled_images.total,
|
||||
});
|
||||
}
|
||||
if num_storage_images.total > limits.max_descriptor_set_storage_images() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetStorageImagesLimitExceeded {
|
||||
limit: limits.max_descriptor_set_storage_images(),
|
||||
requested: num_storage_images.total,
|
||||
});
|
||||
limit: limits.max_descriptor_set_storage_images(),
|
||||
requested: num_storage_images.total,
|
||||
});
|
||||
}
|
||||
if num_input_attachments.total > limits.max_descriptor_set_input_attachments() {
|
||||
return Err(PipelineLayoutLimitsError::MaxDescriptorSetInputAttachmentsLimitExceeded {
|
||||
limit: limits.max_descriptor_set_input_attachments(),
|
||||
requested: num_input_attachments.total,
|
||||
});
|
||||
limit: limits.max_descriptor_set_input_attachments(),
|
||||
requested: num_input_attachments.total,
|
||||
});
|
||||
}
|
||||
|
||||
for pc_id in 0 .. desc.num_push_constants_ranges() {
|
||||
let PipelineLayoutDescPcRange {
|
||||
offset,
|
||||
size,
|
||||
..
|
||||
} = {
|
||||
let PipelineLayoutDescPcRange { offset, size, .. } = {
|
||||
match desc.push_constants_range(pc_id) {
|
||||
Some(o) => o,
|
||||
None => continue,
|
||||
@ -196,9 +194,9 @@ pub fn check_desc_against_limits<D>(desc: &D, limits: Limits)
|
||||
|
||||
if offset + size > limits.max_push_constants_size() as usize {
|
||||
return Err(PipelineLayoutLimitsError::MaxPushConstantsSizeExceeded {
|
||||
limit: limits.max_push_constants_size() as usize,
|
||||
requested: offset + size,
|
||||
});
|
||||
limit: limits.max_push_constants_size() as usize,
|
||||
requested: offset + size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -361,19 +359,29 @@ impl error::Error for PipelineLayoutLimitsError {
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorSamplersLimitExceeded { .. } => {
|
||||
"the `max_per_stage_descriptor_samplers()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorUniformBuffersLimitExceeded { .. } => {
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorUniformBuffersLimitExceeded {
|
||||
..
|
||||
} => {
|
||||
"the `max_per_stage_descriptor_uniform_buffers()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageBuffersLimitExceeded { .. } => {
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageBuffersLimitExceeded {
|
||||
..
|
||||
} => {
|
||||
"the `max_per_stage_descriptor_storage_buffers()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorSampledImagesLimitExceeded { .. } => {
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorSampledImagesLimitExceeded {
|
||||
..
|
||||
} => {
|
||||
"the `max_per_stage_descriptor_sampled_images()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageImagesLimitExceeded { .. } => {
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorStorageImagesLimitExceeded {
|
||||
..
|
||||
} => {
|
||||
"the `max_per_stage_descriptor_storage_images()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorInputAttachmentsLimitExceeded { .. } => {
|
||||
PipelineLayoutLimitsError::MaxPerStageDescriptorInputAttachmentsLimitExceeded {
|
||||
..
|
||||
} => {
|
||||
"the `max_per_stage_descriptor_input_attachments()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetSamplersLimitExceeded { .. } => {
|
||||
@ -382,13 +390,17 @@ impl error::Error for PipelineLayoutLimitsError {
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersLimitExceeded { .. } => {
|
||||
"the `max_descriptor_set_uniform_buffers()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersDynamicLimitExceeded { .. } => {
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetUniformBuffersDynamicLimitExceeded {
|
||||
..
|
||||
} => {
|
||||
"the `max_descriptor_set_uniform_buffers_dynamic()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersLimitExceeded { .. } => {
|
||||
"the `max_descriptor_set_storage_buffers()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersDynamicLimitExceeded { .. } => {
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetStorageBuffersDynamicLimitExceeded {
|
||||
..
|
||||
} => {
|
||||
"the `max_descriptor_set_storage_buffers_dynamic()` limit has been exceeded"
|
||||
},
|
||||
PipelineLayoutLimitsError::MaxDescriptorSetSampledImagesLimitExceeded { .. } => {
|
||||
@ -426,22 +438,46 @@ struct Counter {
|
||||
impl Counter {
|
||||
fn increment(&mut self, num: u32, stages: &ShaderStages) {
|
||||
self.total += num;
|
||||
if stages.compute { self.compute += num; }
|
||||
if stages.vertex { self.vertex += num; }
|
||||
if stages.tessellation_control { self.tess_ctl += num; }
|
||||
if stages.tessellation_evaluation { self.tess_eval += num; }
|
||||
if stages.geometry { self.geometry += num; }
|
||||
if stages.fragment { self.frag += num; }
|
||||
if stages.compute {
|
||||
self.compute += num;
|
||||
}
|
||||
if stages.vertex {
|
||||
self.vertex += num;
|
||||
}
|
||||
if stages.tessellation_control {
|
||||
self.tess_ctl += num;
|
||||
}
|
||||
if stages.tessellation_evaluation {
|
||||
self.tess_eval += num;
|
||||
}
|
||||
if stages.geometry {
|
||||
self.geometry += num;
|
||||
}
|
||||
if stages.fragment {
|
||||
self.frag += num;
|
||||
}
|
||||
}
|
||||
|
||||
fn max_per_stage(&self) -> u32 {
|
||||
let mut max = 0;
|
||||
if self.compute > max { max = self.compute; }
|
||||
if self.vertex > max { max = self.vertex; }
|
||||
if self.geometry > max { max = self.geometry; }
|
||||
if self.tess_ctl > max { max = self.tess_ctl; }
|
||||
if self.tess_eval > max { max = self.tess_eval; }
|
||||
if self.frag > max { max = self.frag; }
|
||||
if self.compute > max {
|
||||
max = self.compute;
|
||||
}
|
||||
if self.vertex > max {
|
||||
max = self.vertex;
|
||||
}
|
||||
if self.geometry > max {
|
||||
max = self.geometry;
|
||||
}
|
||||
if self.tess_ctl > max {
|
||||
max = self.tess_ctl;
|
||||
}
|
||||
if self.tess_eval > max {
|
||||
max = self.tess_eval;
|
||||
}
|
||||
if self.frag > max {
|
||||
max = self.frag;
|
||||
}
|
||||
max
|
||||
}
|
||||
}
|
||||
|
@ -17,11 +17,11 @@ use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::DescriptorSetsCollection;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::limits_check;
|
||||
use descriptor::pipeline_layout::PipelineLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutCreationError;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescUnion;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSys;
|
||||
use descriptor::pipeline_layout::limits_check;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
|
||||
@ -103,8 +103,7 @@ pub unsafe trait PipelineLayoutDesc {
|
||||
/// Checks whether this description fulfills the device limits requirements.
|
||||
#[inline]
|
||||
fn check_against_limits(&self, device: &Device)
|
||||
-> Result<(), limits_check::PipelineLayoutLimitsError>
|
||||
{
|
||||
-> Result<(), limits_check::PipelineLayoutLimitsError> {
|
||||
limits_check::check_desc_against_limits(self, device.physical_device().limits())
|
||||
}
|
||||
|
||||
|
@ -168,8 +168,8 @@ impl Device {
|
||||
///
|
||||
// TODO: return Arc<Queue> and handle synchronization in the Queue
|
||||
// TODO: should take the PhysicalDevice by value
|
||||
pub fn new<'a, I, Ext>(phys: PhysicalDevice, requested_features: &Features,
|
||||
extensions: Ext, queue_families: I)
|
||||
pub fn new<'a, I, Ext>(phys: PhysicalDevice, requested_features: &Features, extensions: Ext,
|
||||
queue_families: I)
|
||||
-> Result<(Arc<Device>, QueuesIter), DeviceCreationError>
|
||||
where I: IntoIterator<Item = (QueueFamily<'a>, f32)>,
|
||||
Ext: Into<RawDeviceExtensions>
|
||||
@ -296,27 +296,27 @@ impl Device {
|
||||
*const _
|
||||
});
|
||||
|
||||
let device = Arc::new(Device {
|
||||
instance: phys.instance().clone(),
|
||||
physical_device: phys.index(),
|
||||
device: device,
|
||||
vk: vk,
|
||||
standard_pool: Mutex::new(Weak::new()),
|
||||
standard_descriptor_pool: Mutex::new(Weak::new()),
|
||||
standard_command_pools: Mutex::new(Default::default()),
|
||||
features: Features {
|
||||
// Always enabled ; see above
|
||||
robust_buffer_access: true,
|
||||
.. requested_features.clone()
|
||||
},
|
||||
extensions: (&extensions).into(),
|
||||
active_queue_families: output_queues.iter()
|
||||
.map(|&(q, _)| q).collect(),
|
||||
allocation_count: Mutex::new(0),
|
||||
fence_pool: Mutex::new(Vec::new()),
|
||||
semaphore_pool: Mutex::new(Vec::new()),
|
||||
event_pool: Mutex::new(Vec::new()),
|
||||
});
|
||||
let device =
|
||||
Arc::new(Device {
|
||||
instance: phys.instance().clone(),
|
||||
physical_device: phys.index(),
|
||||
device: device,
|
||||
vk: vk,
|
||||
standard_pool: Mutex::new(Weak::new()),
|
||||
standard_descriptor_pool: Mutex::new(Weak::new()),
|
||||
standard_command_pools: Mutex::new(Default::default()),
|
||||
features: Features {
|
||||
// Always enabled ; see above
|
||||
robust_buffer_access: true,
|
||||
..requested_features.clone()
|
||||
},
|
||||
extensions: (&extensions).into(),
|
||||
active_queue_families: output_queues.iter().map(|&(q, _)| q).collect(),
|
||||
allocation_count: Mutex::new(0),
|
||||
fence_pool: Mutex::new(Vec::new()),
|
||||
semaphore_pool: Mutex::new(Vec::new()),
|
||||
event_pool: Mutex::new(Vec::new()),
|
||||
});
|
||||
|
||||
// Iterator for the produced queues.
|
||||
let output_queues = QueuesIter {
|
||||
@ -367,10 +367,12 @@ impl Device {
|
||||
/// > **Note**: Will return `-> impl ExactSizeIterator<Item = QueueFamily>` in the future.
|
||||
// TODO: ^
|
||||
#[inline]
|
||||
pub fn active_queue_families<'a>(&'a self) -> Box<ExactSizeIterator<Item = QueueFamily<'a>> + 'a> {
|
||||
pub fn active_queue_families<'a>(&'a self)
|
||||
-> Box<ExactSizeIterator<Item = QueueFamily<'a>> + 'a> {
|
||||
let physical_device = self.physical_device();
|
||||
Box::new(self.active_queue_families.iter()
|
||||
.map(move |&id| physical_device.queue_family_by_id(id).unwrap()))
|
||||
Box::new(self.active_queue_families
|
||||
.iter()
|
||||
.map(move |&id| physical_device.queue_family_by_id(id).unwrap()))
|
||||
}
|
||||
|
||||
/// Returns the features that are enabled in the device.
|
||||
|
@ -102,8 +102,8 @@
|
||||
//! // TODO: storage formats
|
||||
//!
|
||||
|
||||
use std::{error, fmt, mem};
|
||||
use std::vec::IntoIter as VecIntoIter;
|
||||
use std::{mem, error, fmt};
|
||||
|
||||
use half::f16;
|
||||
|
||||
@ -141,7 +141,9 @@ pub struct IncompatiblePixelsType;
|
||||
|
||||
impl error::Error for IncompatiblePixelsType {
|
||||
#[inline]
|
||||
fn description(&self) -> &str { "supplied pixels' type is incompatible with this format" }
|
||||
fn description(&self) -> &str {
|
||||
"supplied pixels' type is incompatible with this format"
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for IncompatiblePixelsType {
|
||||
@ -166,7 +168,9 @@ pub unsafe trait AcceptsPixels<T> {
|
||||
/// # Panics
|
||||
///
|
||||
/// May panic if `ensure_accepts` would not return `Ok(())`.
|
||||
fn rate(&self) -> u32 { 1 }
|
||||
fn rate(&self) -> u32 {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! formats {
|
||||
@ -780,7 +784,7 @@ impl FormatTy {
|
||||
FormatTy::Depth => true,
|
||||
FormatTy::Stencil => true,
|
||||
FormatTy::DepthStencil => true,
|
||||
_ => false
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,12 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
|
||||
if ds == attachment_num {
|
||||
// Was normally checked by the render pass.
|
||||
debug_assert!(image.parent().has_depth() || image.parent().has_stencil());
|
||||
if !image.parent().inner().image.usage_depth_stencil_attachment() {
|
||||
if !image
|
||||
.parent()
|
||||
.inner()
|
||||
.image
|
||||
.usage_depth_stencil_attachment()
|
||||
{
|
||||
return Err(IncompatibleRenderPassAttachmentError::MissingDepthStencilAttachmentUsage);
|
||||
}
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ impl<F> AttachmentImage<F> {
|
||||
#[inline]
|
||||
pub fn multisampled_input_attachment(device: Arc<Device>, dimensions: [u32; 2], samples: u32,
|
||||
format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
let base_usage = ImageUsage {
|
||||
@ -184,7 +184,7 @@ impl<F> AttachmentImage<F> {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled(device: Arc<Device>, dimensions: [u32; 2], format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
let base_usage = ImageUsage {
|
||||
@ -200,7 +200,7 @@ impl<F> AttachmentImage<F> {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled_input_attachment(device: Arc<Device>, dimensions: [u32; 2], format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
let base_usage = ImageUsage {
|
||||
@ -219,9 +219,8 @@ impl<F> AttachmentImage<F> {
|
||||
///
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32,
|
||||
format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
pub fn sampled_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
let base_usage = ImageUsage {
|
||||
@ -237,9 +236,9 @@ impl<F> AttachmentImage<F> {
|
||||
///
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled_multisampled_input_attachment(device: Arc<Device>, dimensions: [u32; 2],
|
||||
samples: u32, format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
pub fn sampled_multisampled_input_attachment(
|
||||
device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
let base_usage = ImageUsage {
|
||||
@ -275,7 +274,7 @@ impl<F> AttachmentImage<F> {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn transient_input_attachment(device: Arc<Device>, dimensions: [u32; 2], format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
let base_usage = ImageUsage {
|
||||
@ -312,9 +311,9 @@ impl<F> AttachmentImage<F> {
|
||||
///
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn transient_multisampled_input_attachment(device: Arc<Device>, dimensions: [u32; 2],
|
||||
samples: u32, format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
pub fn transient_multisampled_input_attachment(
|
||||
device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
let base_usage = ImageUsage {
|
||||
|
@ -46,8 +46,8 @@ use memory::pool::MemoryPoolAlloc;
|
||||
use memory::pool::PotentialDedicatedAllocation;
|
||||
use memory::pool::StdMemoryPoolAlloc;
|
||||
use sync::AccessError;
|
||||
use sync::Sharing;
|
||||
use sync::NowFuture;
|
||||
use sync::Sharing;
|
||||
|
||||
/// Image whose purpose is to be used for read-only purposes. You can write to the image once,
|
||||
/// but then you must only ever read from it.
|
||||
@ -90,9 +90,9 @@ impl<F> ImmutableImage<F> {
|
||||
pub fn with_mipmaps<'a, I, M>(device: Arc<Device>, dimensions: Dimensions, format: F,
|
||||
mipmaps: M, queue_families: I)
|
||||
-> Result<Arc<ImmutableImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc,
|
||||
I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
M: Into<MipmapsCount>
|
||||
where F: FormatDesc,
|
||||
I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
M: Into<MipmapsCount>
|
||||
{
|
||||
let usage = ImageUsage {
|
||||
transfer_source: true, // for blits
|
||||
@ -101,7 +101,13 @@ impl<F> ImmutableImage<F> {
|
||||
..ImageUsage::none()
|
||||
};
|
||||
|
||||
let (image, _) = ImmutableImage::uninitialized(device, dimensions, format, mipmaps, usage, ImageLayout::ShaderReadOnlyOptimal, queue_families)?;
|
||||
let (image, _) = ImmutableImage::uninitialized(device,
|
||||
dimensions,
|
||||
format,
|
||||
mipmaps,
|
||||
usage,
|
||||
ImageLayout::ShaderReadOnlyOptimal,
|
||||
queue_families)?;
|
||||
image.initialized.store(true, Ordering::Relaxed); // Allow uninitialized access for backwards compatibility
|
||||
Ok(image)
|
||||
}
|
||||
@ -109,9 +115,10 @@ impl<F> ImmutableImage<F> {
|
||||
/// Builds an uninitialized immutable image.
|
||||
///
|
||||
/// Returns two things: the image, and a special access that should be used for the initial upload to the image.
|
||||
pub fn uninitialized<'a, I, M>(device: Arc<Device>, dimensions: Dimensions, format: F,
|
||||
mipmaps: M, usage: ImageUsage, layout: ImageLayout, queue_families: I)
|
||||
-> Result<(Arc<ImmutableImage<F>>, ImmutableImageInitialization<F>), ImageCreationError>
|
||||
pub fn uninitialized<'a, I, M>(
|
||||
device: Arc<Device>, dimensions: Dimensions, format: F, mipmaps: M, usage: ImageUsage,
|
||||
layout: ImageLayout, queue_families: I)
|
||||
-> Result<(Arc<ImmutableImage<F>>, ImmutableImageInitialization<F>), ImageCreationError>
|
||||
where F: FormatDesc,
|
||||
I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
M: Into<MipmapsCount>
|
||||
@ -162,14 +169,14 @@ impl<F> ImmutableImage<F> {
|
||||
};
|
||||
|
||||
let image = Arc::new(ImmutableImage {
|
||||
image: image,
|
||||
view: view,
|
||||
memory: mem,
|
||||
dimensions: dimensions,
|
||||
format: format,
|
||||
initialized: AtomicBool::new(false),
|
||||
layout: layout,
|
||||
});
|
||||
image: image,
|
||||
view: view,
|
||||
memory: mem,
|
||||
dimensions: dimensions,
|
||||
format: format,
|
||||
initialized: AtomicBool::new(false),
|
||||
layout: layout,
|
||||
});
|
||||
|
||||
let init = ImmutableImageInitialization {
|
||||
image: image.clone(),
|
||||
@ -184,12 +191,13 @@ impl<F> ImmutableImage<F> {
|
||||
/// TODO: Support mipmaps
|
||||
#[inline]
|
||||
pub fn from_iter<P, I>(iter: I, dimensions: Dimensions, format: F, queue: Arc<Queue>)
|
||||
-> Result<(Arc<Self>, CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
|
||||
-> Result<(Arc<Self>,
|
||||
CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
|
||||
ImageCreationError>
|
||||
where P: Send + Sync + Clone + 'static,
|
||||
F: FormatDesc + AcceptsPixels<P> + 'static + Send + Sync,
|
||||
I: ExactSizeIterator<Item = P>,
|
||||
Format: AcceptsPixels<P>,
|
||||
Format: AcceptsPixels<P>
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_iter(queue.device().clone(),
|
||||
BufferUsage::transfer_source(),
|
||||
@ -201,24 +209,41 @@ impl<F> ImmutableImage<F> {
|
||||
///
|
||||
/// TODO: Support mipmaps
|
||||
pub fn from_buffer<B, P>(source: B, dimensions: Dimensions, format: F, queue: Arc<Queue>)
|
||||
-> Result<(Arc<Self>, CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
|
||||
-> Result<(Arc<Self>,
|
||||
CommandBufferExecFuture<NowFuture, AutoCommandBuffer>),
|
||||
ImageCreationError>
|
||||
where B: BufferAccess + TypedBufferAccess<Content = [P]> + 'static + Clone + Send + Sync,
|
||||
P: Send + Sync + Clone + 'static,
|
||||
F: FormatDesc + AcceptsPixels<P> + 'static + Send + Sync,
|
||||
Format: AcceptsPixels<P>,
|
||||
Format: AcceptsPixels<P>
|
||||
{
|
||||
let usage = ImageUsage { transfer_destination: true, sampled: true, ..ImageUsage::none() };
|
||||
let usage = ImageUsage {
|
||||
transfer_destination: true,
|
||||
sampled: true,
|
||||
..ImageUsage::none()
|
||||
};
|
||||
let layout = ImageLayout::ShaderReadOnlyOptimal;
|
||||
|
||||
let (buffer, init) = ImmutableImage::uninitialized(source.device().clone(),
|
||||
dimensions, format,
|
||||
MipmapsCount::One, usage, layout,
|
||||
source.device().active_queue_families())?;
|
||||
let (buffer, init) =
|
||||
ImmutableImage::uninitialized(source.device().clone(),
|
||||
dimensions,
|
||||
format,
|
||||
MipmapsCount::One,
|
||||
usage,
|
||||
layout,
|
||||
source.device().active_queue_families())?;
|
||||
|
||||
let cb = AutoCommandBufferBuilder::new(source.device().clone(), queue.family())?
|
||||
.copy_buffer_to_image_dimensions(source, init, [0, 0, 0], dimensions.width_height_depth(), 0, dimensions.array_layers_with_cube(), 0).unwrap()
|
||||
.build().unwrap();
|
||||
.copy_buffer_to_image_dimensions(source,
|
||||
init,
|
||||
[0, 0, 0],
|
||||
dimensions.width_height_depth(),
|
||||
0,
|
||||
dimensions.array_layers_with_cube(),
|
||||
0)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let future = match cb.execute(queue) {
|
||||
Ok(f) => f,
|
||||
@ -244,7 +269,7 @@ impl<F, A> ImmutableImage<F, A> {
|
||||
}
|
||||
|
||||
unsafe impl<F, A> ImageAccess for ImmutableImage<F, A>
|
||||
where F: 'static + Send + Sync,
|
||||
where F: 'static + Send + Sync
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> ImageInner {
|
||||
@ -286,14 +311,16 @@ unsafe impl<F, A> ImageAccess for ImmutableImage<F, A>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn increase_gpu_lock(&self) {}
|
||||
unsafe fn increase_gpu_lock(&self) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn unlock(&self) {}
|
||||
unsafe fn unlock(&self) {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<P, F, A> ImageContent<P> for ImmutableImage<F, A>
|
||||
where F: 'static + Send + Sync,
|
||||
where F: 'static + Send + Sync
|
||||
{
|
||||
#[inline]
|
||||
fn matches_format(&self) -> bool {
|
||||
@ -302,7 +329,7 @@ unsafe impl<P, F, A> ImageContent<P> for ImmutableImage<F, A>
|
||||
}
|
||||
|
||||
unsafe impl<F: 'static, A> ImageViewAccess for ImmutableImage<F, A>
|
||||
where F: 'static + Send + Sync,
|
||||
where F: 'static + Send + Sync
|
||||
{
|
||||
#[inline]
|
||||
fn parent(&self) -> &ImageAccess {
|
||||
@ -346,7 +373,7 @@ unsafe impl<F: 'static, A> ImageViewAccess for ImmutableImage<F, A>
|
||||
}
|
||||
|
||||
unsafe impl<F, A> ImageAccess for ImmutableImageInitialization<F, A>
|
||||
where F: 'static + Send + Sync,
|
||||
where F: 'static + Send + Sync
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> ImageInner {
|
||||
|
@ -482,35 +482,47 @@ impl ImageDimensions {
|
||||
}
|
||||
|
||||
Some(match *self {
|
||||
ImageDimensions::Dim1d { width, array_layers } => {
|
||||
debug_assert_ne!(width, 0);
|
||||
ImageDimensions::Dim1d {
|
||||
array_layers: array_layers,
|
||||
width: (((width - 1) >> level) + 1).next_power_of_two(),
|
||||
}
|
||||
},
|
||||
ImageDimensions::Dim1d {
|
||||
width,
|
||||
array_layers,
|
||||
} => {
|
||||
debug_assert_ne!(width, 0);
|
||||
ImageDimensions::Dim1d {
|
||||
array_layers: array_layers,
|
||||
width: (((width - 1) >> level) + 1).next_power_of_two(),
|
||||
}
|
||||
},
|
||||
|
||||
ImageDimensions::Dim2d { width, height, array_layers, cubemap_compatible } => {
|
||||
debug_assert_ne!(width, 0);
|
||||
debug_assert_ne!(height, 0);
|
||||
ImageDimensions::Dim2d {
|
||||
width: (((width - 1) >> level) + 1).next_power_of_two(),
|
||||
height: (((height - 1) >> level) + 1).next_power_of_two(),
|
||||
array_layers: array_layers,
|
||||
cubemap_compatible: cubemap_compatible,
|
||||
}
|
||||
},
|
||||
ImageDimensions::Dim2d {
|
||||
width,
|
||||
height,
|
||||
array_layers,
|
||||
cubemap_compatible,
|
||||
} => {
|
||||
debug_assert_ne!(width, 0);
|
||||
debug_assert_ne!(height, 0);
|
||||
ImageDimensions::Dim2d {
|
||||
width: (((width - 1) >> level) + 1).next_power_of_two(),
|
||||
height: (((height - 1) >> level) + 1).next_power_of_two(),
|
||||
array_layers: array_layers,
|
||||
cubemap_compatible: cubemap_compatible,
|
||||
}
|
||||
},
|
||||
|
||||
ImageDimensions::Dim3d { width, height, depth } => {
|
||||
debug_assert_ne!(width, 0);
|
||||
debug_assert_ne!(height, 0);
|
||||
ImageDimensions::Dim3d {
|
||||
width: (((width - 1) >> level) + 1).next_power_of_two(),
|
||||
height: (((height - 1) >> level) + 1).next_power_of_two(),
|
||||
depth: (((depth - 1) >> level) + 1).next_power_of_two(),
|
||||
}
|
||||
},
|
||||
})
|
||||
ImageDimensions::Dim3d {
|
||||
width,
|
||||
height,
|
||||
depth,
|
||||
} => {
|
||||
debug_assert_ne!(width, 0);
|
||||
debug_assert_ne!(height, 0);
|
||||
ImageDimensions::Dim3d {
|
||||
width: (((width - 1) >> level) + 1).next_power_of_two(),
|
||||
height: (((height - 1) >> level) + 1).next_power_of_two(),
|
||||
depth: (((depth - 1) >> level) + 1).next_power_of_two(),
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -520,29 +532,103 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn max_mipmaps() {
|
||||
let dims = ImageDimensions::Dim2d { width: 2, height: 1, cubemap_compatible: false, array_layers: 1 };
|
||||
let dims = ImageDimensions::Dim2d {
|
||||
width: 2,
|
||||
height: 1,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
};
|
||||
assert_eq!(dims.max_mipmaps(), 2);
|
||||
|
||||
let dims = ImageDimensions::Dim2d { width: 2, height: 3, cubemap_compatible: false, array_layers: 1 };
|
||||
let dims = ImageDimensions::Dim2d {
|
||||
width: 2,
|
||||
height: 3,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
};
|
||||
assert_eq!(dims.max_mipmaps(), 3);
|
||||
|
||||
let dims = ImageDimensions::Dim2d { width: 512, height: 512, cubemap_compatible: false, array_layers: 1 };
|
||||
let dims = ImageDimensions::Dim2d {
|
||||
width: 512,
|
||||
height: 512,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
};
|
||||
assert_eq!(dims.max_mipmaps(), 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mipmap_dimensions() {
|
||||
let dims = ImageDimensions::Dim2d { width: 283, height: 175, cubemap_compatible: false, array_layers: 1 };
|
||||
let dims = ImageDimensions::Dim2d {
|
||||
width: 283,
|
||||
height: 175,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
};
|
||||
assert_eq!(dims.mipmap_dimensions(0), Some(dims));
|
||||
assert_eq!(dims.mipmap_dimensions(1), Some(ImageDimensions::Dim2d { width: 256, height: 128, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(2), Some(ImageDimensions::Dim2d { width: 128, height: 64, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(3), Some(ImageDimensions::Dim2d { width: 64, height: 32, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(4), Some(ImageDimensions::Dim2d { width: 32, height: 16, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(5), Some(ImageDimensions::Dim2d { width: 16, height: 8, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(6), Some(ImageDimensions::Dim2d { width: 8, height: 4, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(7), Some(ImageDimensions::Dim2d { width: 4, height: 2, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(8), Some(ImageDimensions::Dim2d { width: 2, height: 1, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(9), Some(ImageDimensions::Dim2d { width: 1, height: 1, cubemap_compatible: false, array_layers: 1 }));
|
||||
assert_eq!(dims.mipmap_dimensions(1),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 256,
|
||||
height: 128,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(2),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 128,
|
||||
height: 64,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(3),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 64,
|
||||
height: 32,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(4),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 16,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(5),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 16,
|
||||
height: 8,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(6),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 8,
|
||||
height: 4,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(7),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 4,
|
||||
height: 2,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(8),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 2,
|
||||
height: 1,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(9),
|
||||
Some(ImageDimensions::Dim2d {
|
||||
width: 1,
|
||||
height: 1,
|
||||
cubemap_compatible: false,
|
||||
array_layers: 1,
|
||||
}));
|
||||
assert_eq!(dims.mipmap_dimensions(10), None);
|
||||
}
|
||||
}
|
||||
|
@ -30,8 +30,8 @@ use image::traits::ImageContent;
|
||||
use image::traits::ImageViewAccess;
|
||||
use instance::QueueFamily;
|
||||
use memory::DedicatedAlloc;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::AllocFromRequirementsFilter;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::MappingRequirement;
|
||||
use memory::pool::MemoryPool;
|
||||
use memory::pool::MemoryPoolAlloc;
|
||||
|
@ -29,8 +29,8 @@ use image::ImageUsage;
|
||||
use image::MipmapsCount;
|
||||
use image::ViewType;
|
||||
use memory::DeviceMemory;
|
||||
use memory::MemoryRequirements;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use memory::MemoryRequirements;
|
||||
use sync::Sharing;
|
||||
|
||||
use Error;
|
||||
@ -506,18 +506,20 @@ impl UnsafeImage {
|
||||
|
||||
let mut output2 = if device.loaded_extensions().khr_dedicated_allocation {
|
||||
Some(vk::MemoryDedicatedRequirementsKHR {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
|
||||
pNext: ptr::null(),
|
||||
prefersDedicatedAllocation: mem::uninitialized(),
|
||||
requiresDedicatedAllocation: mem::uninitialized(),
|
||||
})
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
|
||||
pNext: ptr::null(),
|
||||
prefersDedicatedAllocation: mem::uninitialized(),
|
||||
requiresDedicatedAllocation: mem::uninitialized(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut output = vk::MemoryRequirements2KHR {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
|
||||
pNext: output2.as_mut().map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
|
||||
pNext: output2
|
||||
.as_mut()
|
||||
.map(|o| o as *mut vk::MemoryDedicatedRequirementsKHR)
|
||||
.unwrap_or(ptr::null_mut()) as *mut _,
|
||||
memoryRequirements: mem::uninitialized(),
|
||||
};
|
||||
|
@ -11,9 +11,9 @@ use std::collections::HashSet;
|
||||
use std::error;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::fmt;
|
||||
use std::iter::FromIterator;
|
||||
use std::ptr;
|
||||
use std::str;
|
||||
use std::iter::FromIterator;
|
||||
|
||||
use Error;
|
||||
use OomError;
|
||||
|
@ -94,8 +94,10 @@ pub struct Instance {
|
||||
}
|
||||
|
||||
// TODO: fix the underlying cause instead
|
||||
impl ::std::panic::UnwindSafe for Instance {}
|
||||
impl ::std::panic::RefUnwindSafe for Instance {}
|
||||
impl ::std::panic::UnwindSafe for Instance {
|
||||
}
|
||||
impl ::std::panic::RefUnwindSafe for Instance {
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Initializes a new instance of Vulkan.
|
||||
@ -133,14 +135,16 @@ impl Instance {
|
||||
.map(|&layer| CString::new(layer).unwrap())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
Instance::new_inner(app_infos, extensions.into(), layers,
|
||||
Instance::new_inner(app_infos,
|
||||
extensions.into(),
|
||||
layers,
|
||||
OwnedOrRef::Ref(loader::auto_loader()?))
|
||||
}
|
||||
|
||||
/// Same as `new`, but allows specifying a loader where to load Vulkan from.
|
||||
pub fn with_loader<'a, L, Ext>(loader: FunctionPointers<Box<Loader + Send + Sync>>,
|
||||
app_infos: Option<&ApplicationInfo>, extensions: Ext,
|
||||
layers: L) -> Result<Arc<Instance>, InstanceCreationError>
|
||||
app_infos: Option<&ApplicationInfo>, extensions: Ext, layers: L)
|
||||
-> Result<Arc<Instance>, InstanceCreationError>
|
||||
where L: IntoIterator<Item = &'a &'a str>,
|
||||
Ext: Into<RawInstanceExtensions>
|
||||
{
|
||||
@ -149,7 +153,9 @@ impl Instance {
|
||||
.map(|&layer| CString::new(layer).unwrap())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
Instance::new_inner(app_infos, extensions.into(), layers,
|
||||
Instance::new_inner(app_infos,
|
||||
extensions.into(),
|
||||
layers,
|
||||
OwnedOrRef::Owned(loader))
|
||||
}
|
||||
|
||||
@ -516,7 +522,7 @@ impl<'a> ApplicationInfo<'a> {
|
||||
/// - Panics if the required environment variables are missing, which happens if the project
|
||||
/// wasn't built by Cargo.
|
||||
///
|
||||
#[deprecated(note="Please use the `app_info_from_cargo_toml!` macro instead")]
|
||||
#[deprecated(note = "Please use the `app_info_from_cargo_toml!` macro instead")]
|
||||
pub fn from_cargo_toml() -> ApplicationInfo<'a> {
|
||||
let version = Version {
|
||||
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
||||
|
@ -21,6 +21,7 @@
|
||||
//! By default vulkano will use the `auto_loader()` function, which tries to automatically load
|
||||
//! a Vulkan implementation from the system.
|
||||
|
||||
use shared_library;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
@ -28,7 +29,6 @@ use std::ops::Deref;
|
||||
use std::os::raw::c_char;
|
||||
use std::os::raw::c_void;
|
||||
use std::path::Path;
|
||||
use shared_library;
|
||||
|
||||
use SafeDeref;
|
||||
use vk;
|
||||
@ -57,7 +57,7 @@ unsafe impl<T> Loader for T
|
||||
pub struct DynamicLibraryLoader {
|
||||
vk_lib: shared_library::dynamic_library::DynamicLibrary,
|
||||
get_proc_addr: extern "system" fn(instance: vk::Instance, pName: *const c_char)
|
||||
-> extern "system" fn() -> (),
|
||||
-> extern "system" fn() -> (),
|
||||
}
|
||||
|
||||
impl DynamicLibraryLoader {
|
||||
@ -75,23 +75,26 @@ impl DynamicLibraryLoader {
|
||||
.map_err(LoadingError::LibraryLoadFailure)?;
|
||||
|
||||
let get_proc_addr = {
|
||||
let ptr: *mut c_void = vk_lib
|
||||
.symbol("vkGetInstanceProcAddr")
|
||||
.map_err(|_| LoadingError::MissingEntryPoint("vkGetInstanceProcAddr".to_owned()))?;
|
||||
let ptr: *mut c_void =
|
||||
vk_lib
|
||||
.symbol("vkGetInstanceProcAddr")
|
||||
.map_err(|_| {
|
||||
LoadingError::MissingEntryPoint("vkGetInstanceProcAddr".to_owned())
|
||||
})?;
|
||||
mem::transmute(ptr)
|
||||
};
|
||||
|
||||
Ok(DynamicLibraryLoader {
|
||||
vk_lib,
|
||||
get_proc_addr,
|
||||
})
|
||||
vk_lib,
|
||||
get_proc_addr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Loader for DynamicLibraryLoader {
|
||||
#[inline]
|
||||
fn get_instance_proc_addr(&self, instance: vk::Instance, name: *const c_char)
|
||||
-> extern "system" fn() -> () {
|
||||
-> extern "system" fn() -> () {
|
||||
(self.get_proc_addr)(instance, name)
|
||||
}
|
||||
}
|
||||
@ -107,11 +110,7 @@ impl<L> FunctionPointers<L> {
|
||||
pub fn new(loader: L) -> FunctionPointers<L>
|
||||
where L: Loader
|
||||
{
|
||||
let entry_points = vk::EntryPoints::load(|name| {
|
||||
unsafe {
|
||||
mem::transmute(loader.get_instance_proc_addr(0, name.as_ptr()))
|
||||
}
|
||||
});
|
||||
let entry_points = vk::EntryPoints::load(|name| unsafe { mem::transmute(loader.get_instance_proc_addr(0, name.as_ptr())) });
|
||||
|
||||
FunctionPointers {
|
||||
loader,
|
||||
@ -169,7 +168,8 @@ macro_rules! statically_linked_vulkan_loader {
|
||||
/// This function tries to auto-guess where to find the Vulkan implementation, and loads it in a
|
||||
/// `lazy_static!`. The content of the lazy_static is then returned, or an error if we failed to
|
||||
/// load Vulkan.
|
||||
pub fn auto_loader()
|
||||
pub fn auto_loader(
|
||||
)
|
||||
-> Result<&'static FunctionPointers<Box<Loader + Send + Sync>>, LoadingError>
|
||||
{
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
@ -194,9 +194,7 @@ pub fn auto_loader()
|
||||
Path::new("libvulkan.so")
|
||||
}
|
||||
|
||||
let loader = unsafe {
|
||||
DynamicLibraryLoader::new(get_path())?
|
||||
};
|
||||
let loader = unsafe { DynamicLibraryLoader::new(get_path())? };
|
||||
|
||||
Ok(Box::new(loader))
|
||||
}
|
||||
@ -209,7 +207,7 @@ pub fn auto_loader()
|
||||
|
||||
match DEFAULT_LOADER.deref() {
|
||||
&Ok(ref ptr) => Ok(ptr),
|
||||
&Err(ref err) => Err(err.clone())
|
||||
&Err(ref err) => Err(err.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -263,7 +261,7 @@ mod tests {
|
||||
unsafe {
|
||||
match DynamicLibraryLoader::new("_non_existing_library.void") {
|
||||
Err(LoadingError::LibraryLoadFailure(_)) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,9 +7,9 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::error;
|
||||
use std::ops::Deref;
|
||||
use std::ops::DerefMut;
|
||||
use std::ops::Range;
|
||||
@ -17,8 +17,8 @@ use std::os::raw::c_void;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use OomError;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
@ -93,7 +93,7 @@ impl DeviceMemory {
|
||||
let physical_device = device.physical_device();
|
||||
let mut allocation_count = device.allocation_count().lock().expect("Poisoned mutex");
|
||||
if *allocation_count >= physical_device.limits().max_memory_allocation_count() {
|
||||
return Err(DeviceMemoryAllocError::TooManyObjects)
|
||||
return Err(DeviceMemoryAllocError::TooManyObjects);
|
||||
}
|
||||
let vk = device.pointers();
|
||||
|
||||
@ -102,23 +102,23 @@ impl DeviceMemory {
|
||||
match resource {
|
||||
DedicatedAlloc::Buffer(buffer) => {
|
||||
Some(vk::MemoryDedicatedAllocateInfoKHR {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
|
||||
pNext: ptr::null(),
|
||||
image: 0,
|
||||
buffer: buffer.internal_object(),
|
||||
})
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
|
||||
pNext: ptr::null(),
|
||||
image: 0,
|
||||
buffer: buffer.internal_object(),
|
||||
})
|
||||
},
|
||||
DedicatedAlloc::Image(image) => {
|
||||
Some(vk::MemoryDedicatedAllocateInfoKHR {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
|
||||
pNext: ptr::null(),
|
||||
image: image.internal_object(),
|
||||
buffer: 0,
|
||||
})
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
|
||||
pNext: ptr::null(),
|
||||
image: image.internal_object(),
|
||||
buffer: 0,
|
||||
})
|
||||
},
|
||||
DedicatedAlloc::None => {
|
||||
None
|
||||
}
|
||||
},
|
||||
}
|
||||
} else {
|
||||
None
|
||||
@ -126,7 +126,10 @@ impl DeviceMemory {
|
||||
|
||||
let infos = vk::MemoryAllocateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
pNext: dedicated_alloc_info.as_ref().map(|i| i as *const vk::MemoryDedicatedAllocateInfoKHR).unwrap_or(ptr::null()) as *const _,
|
||||
pNext: dedicated_alloc_info
|
||||
.as_ref()
|
||||
.map(|i| i as *const vk::MemoryDedicatedAllocateInfoKHR)
|
||||
.unwrap_or(ptr::null()) as *const _,
|
||||
allocationSize: size as u64,
|
||||
memoryTypeIndex: memory_type.id(),
|
||||
};
|
||||
@ -238,7 +241,10 @@ impl Drop for DeviceMemory {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
vk.FreeMemory(self.device.internal_object(), self.memory, ptr::null());
|
||||
let mut allocation_count = self.device.allocation_count().lock().expect("Poisoned mutex");
|
||||
let mut allocation_count = self.device
|
||||
.allocation_count()
|
||||
.lock()
|
||||
.expect("Poisoned mutex");
|
||||
*allocation_count -= 1;
|
||||
}
|
||||
}
|
||||
@ -467,7 +473,8 @@ impl error::Error for DeviceMemoryAllocError {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
DeviceMemoryAllocError::OomError(_) => "not enough memory available",
|
||||
DeviceMemoryAllocError::TooManyObjects => "the maximum number of allocations has been exceeded",
|
||||
DeviceMemoryAllocError::TooManyObjects =>
|
||||
"the maximum number of allocations has been exceeded",
|
||||
DeviceMemoryAllocError::MemoryMapFailed => "memory map failed",
|
||||
}
|
||||
}
|
||||
@ -526,8 +533,8 @@ mod tests {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let mem_ty = device.physical_device().memory_types().next().unwrap();
|
||||
assert_should_panic!({
|
||||
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 0);
|
||||
});
|
||||
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -95,8 +95,8 @@ use vk;
|
||||
|
||||
pub use self::device_memory::CpuAccess;
|
||||
pub use self::device_memory::DeviceMemory;
|
||||
pub use self::device_memory::MappedDeviceMemory;
|
||||
pub use self::device_memory::DeviceMemoryAllocError;
|
||||
pub use self::device_memory::MappedDeviceMemory;
|
||||
pub use self::pool::MemoryPool;
|
||||
|
||||
mod device_memory;
|
||||
|
@ -16,8 +16,8 @@ use device::Device;
|
||||
use instance::Instance;
|
||||
use instance::MemoryType;
|
||||
use memory::DeviceMemory;
|
||||
use memory::MappedDeviceMemory;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use memory::MappedDeviceMemory;
|
||||
|
||||
/// Memory pool that operates on a given memory type.
|
||||
#[derive(Debug)]
|
||||
|
@ -11,9 +11,9 @@ use device::DeviceOwned;
|
||||
use instance::MemoryType;
|
||||
use memory::DedicatedAlloc;
|
||||
use memory::DeviceMemory;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use memory::MappedDeviceMemory;
|
||||
use memory::MemoryRequirements;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
|
||||
pub use self::host_visible::StdHostVisibleMemoryTypePool;
|
||||
pub use self::host_visible::StdHostVisibleMemoryTypePoolAlloc;
|
||||
@ -53,7 +53,8 @@ pub unsafe trait MemoryPool: DeviceOwned {
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
fn alloc_generic(&self, ty: MemoryType, size: usize, alignment: usize, layout: AllocLayout,
|
||||
map: MappingRequirement) -> Result<Self::Alloc, DeviceMemoryAllocError>;
|
||||
map: MappingRequirement)
|
||||
-> Result<Self::Alloc, DeviceMemoryAllocError>;
|
||||
|
||||
/// Chooses a memory type and allocates memory from it.
|
||||
///
|
||||
@ -84,9 +85,10 @@ pub unsafe trait MemoryPool: DeviceOwned {
|
||||
/// - Panics if `size` is 0.
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
fn alloc_from_requirements<F>(&self, requirements: &MemoryRequirements, layout: AllocLayout,
|
||||
map: MappingRequirement, dedicated: DedicatedAlloc, mut filter: F)
|
||||
-> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
|
||||
fn alloc_from_requirements<F>(
|
||||
&self, requirements: &MemoryRequirements, layout: AllocLayout, map: MappingRequirement,
|
||||
dedicated: DedicatedAlloc, mut filter: F)
|
||||
-> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
|
||||
where F: FnMut(MemoryType) -> AllocFromRequirementsFilter
|
||||
{
|
||||
// Choose a suitable memory type.
|
||||
@ -110,33 +112,44 @@ pub unsafe trait MemoryPool: DeviceOwned {
|
||||
.filter(|&(t, _)| (requirements.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|&(t, rq)| filter(t) == rq)
|
||||
.next()
|
||||
.expect("Couldn't find a memory type to allocate from").0
|
||||
.expect("Couldn't find a memory type to allocate from")
|
||||
.0
|
||||
};
|
||||
|
||||
// Redirect to `self.alloc_generic` if we don't perform a dedicated allocation.
|
||||
if !requirements.prefer_dedicated ||
|
||||
!self.device().loaded_extensions().khr_dedicated_allocation
|
||||
{
|
||||
let alloc = self.alloc_generic(mem_ty, requirements.size, requirements.alignment,
|
||||
layout, map)?;
|
||||
let alloc = self.alloc_generic(mem_ty,
|
||||
requirements.size,
|
||||
requirements.alignment,
|
||||
layout,
|
||||
map)?;
|
||||
return Ok(alloc.into());
|
||||
}
|
||||
if let DedicatedAlloc::None = dedicated {
|
||||
let alloc = self.alloc_generic(mem_ty, requirements.size, requirements.alignment,
|
||||
layout, map)?;
|
||||
let alloc = self.alloc_generic(mem_ty,
|
||||
requirements.size,
|
||||
requirements.alignment,
|
||||
layout,
|
||||
map)?;
|
||||
return Ok(alloc.into());
|
||||
}
|
||||
|
||||
// If we reach here, then we perform a dedicated alloc.
|
||||
match map {
|
||||
MappingRequirement::Map => {
|
||||
let mem = DeviceMemory::dedicated_alloc_and_map(self.device().clone(), mem_ty,
|
||||
requirements.size, dedicated)?;
|
||||
let mem = DeviceMemory::dedicated_alloc_and_map(self.device().clone(),
|
||||
mem_ty,
|
||||
requirements.size,
|
||||
dedicated)?;
|
||||
Ok(PotentialDedicatedAllocation::DedicatedMapped(mem))
|
||||
},
|
||||
MappingRequirement::DoNotMap => {
|
||||
let mem = DeviceMemory::dedicated_alloc(self.device().clone(), mem_ty,
|
||||
requirements.size, dedicated)?;
|
||||
let mem = DeviceMemory::dedicated_alloc(self.device().clone(),
|
||||
mem_ty,
|
||||
requirements.size,
|
||||
dedicated)?;
|
||||
Ok(PotentialDedicatedAllocation::Dedicated(mem))
|
||||
},
|
||||
}
|
||||
|
@ -18,8 +18,8 @@ use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use instance::MemoryType;
|
||||
use memory::DeviceMemory;
|
||||
use memory::MappedDeviceMemory;
|
||||
use memory::DeviceMemoryAllocError;
|
||||
use memory::MappedDeviceMemory;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::MappingRequirement;
|
||||
use memory::pool::MemoryPool;
|
||||
@ -34,7 +34,8 @@ pub struct StdMemoryPool {
|
||||
device: Arc<Device>,
|
||||
|
||||
// For each memory type index, stores the associated pool.
|
||||
pools: Mutex<HashMap<(u32, AllocLayout, MappingRequirement), Pool, BuildHasherDefault<FnvHasher>>>,
|
||||
pools:
|
||||
Mutex<HashMap<(u32, AllocLayout, MappingRequirement), Pool, BuildHasherDefault<FnvHasher>>>,
|
||||
}
|
||||
|
||||
impl StdMemoryPool {
|
||||
@ -86,15 +87,14 @@ unsafe impl MemoryPool for Arc<StdMemoryPool> {
|
||||
|
||||
Entry::Vacant(entry) => {
|
||||
if memory_type_host_visible {
|
||||
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(),
|
||||
memory_type);
|
||||
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
|
||||
entry.insert(Pool::HostVisible(pool.clone()));
|
||||
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
|
||||
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
|
||||
Ok(StdMemoryPoolAlloc {
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
} else {
|
||||
let pool = StdNonHostVisibleMemoryTypePool::new(self.device.clone(),
|
||||
memory_type);
|
||||
@ -102,9 +102,9 @@ unsafe impl MemoryPool for Arc<StdMemoryPool> {
|
||||
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
|
||||
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
|
||||
Ok(StdMemoryPoolAlloc {
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ pub struct AttachmentBlend {
|
||||
|
||||
pub color_op: BlendOp,
|
||||
pub color_source: BlendFactor,
|
||||
pub color_destination: BlendFactor,
|
||||
pub color_destination: BlendFactor,
|
||||
|
||||
pub alpha_op: BlendOp,
|
||||
pub alpha_source: BlendFactor,
|
||||
|
@ -244,7 +244,7 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let pipeline = PipelineCache::empty(device).unwrap();
|
||||
assert_should_panic!({
|
||||
pipeline.merge(&[&pipeline]).unwrap();
|
||||
});
|
||||
pipeline.merge(&[&pipeline]).unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -76,10 +76,10 @@ impl<Pl> ComputePipeline<Pl> {
|
||||
///
|
||||
/// An error will be returned if the pipeline layout isn't a superset of what the shader
|
||||
/// uses.
|
||||
pub fn with_pipeline_layout<Cs>(
|
||||
device: Arc<Device>, shader: &Cs, specialization: &Cs::SpecializationConstants,
|
||||
pipeline_layout: Pl)
|
||||
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
|
||||
pub fn with_pipeline_layout<Cs>(device: Arc<Device>, shader: &Cs,
|
||||
specialization: &Cs::SpecializationConstants,
|
||||
pipeline_layout: Pl)
|
||||
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
|
||||
where Cs::PipelineLayout: Clone,
|
||||
Cs: EntryPointAbstract,
|
||||
Pl: PipelineLayoutAbstract
|
||||
@ -366,24 +366,24 @@ impl From<Error> for ComputePipelineCreationError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ffi::CStr;
|
||||
use std::sync::Arc;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::CpuAccessibleBuffer;
|
||||
use command_buffer::AutoCommandBufferBuilder;
|
||||
use descriptor::descriptor::DescriptorBufferDesc;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::DescriptorDescTy;
|
||||
use descriptor::descriptor::DescriptorBufferDesc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::PersistentDescriptorSet;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
use pipeline::ComputePipeline;
|
||||
use pipeline::shader::ShaderModule;
|
||||
use pipeline::shader::SpecializationConstants;
|
||||
use pipeline::shader::SpecializationMapEntry;
|
||||
use pipeline::ComputePipeline;
|
||||
use sync::now;
|
||||
use std::ffi::CStr;
|
||||
use std::sync::Arc;
|
||||
use sync::GpuFuture;
|
||||
use sync::now;
|
||||
|
||||
// TODO: test for basic creation
|
||||
// TODO: test for pipeline layout error
|
||||
@ -412,33 +412,488 @@ mod tests {
|
||||
write.write = VALUE;
|
||||
}
|
||||
*/
|
||||
const MODULE: [u8; 480] = [3, 2, 35, 7, 0, 0, 1, 0, 1, 0, 8, 0, 14, 0, 0, 0, 0, 0, 0,
|
||||
0, 17, 0, 2, 0, 1, 0, 0, 0, 11, 0, 6, 0, 1, 0, 0, 0, 71, 76,
|
||||
83, 76, 46, 115, 116, 100, 46, 52, 53, 48, 0, 0, 0, 0, 14,
|
||||
0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 15, 0, 5, 0, 5, 0, 0, 0, 4,
|
||||
0, 0, 0, 109, 97, 105, 110, 0, 0, 0, 0, 16, 0, 6, 0, 4, 0,
|
||||
0, 0, 17, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0,
|
||||
3, 0, 2, 0, 0, 0, 194, 1, 0, 0, 5, 0, 4, 0, 4, 0, 0, 0, 109,
|
||||
97, 105, 110, 0, 0, 0, 0, 5, 0, 4, 0, 7, 0, 0, 0, 79, 117,
|
||||
116, 112, 117, 116, 0, 0, 6, 0, 5, 0, 7, 0, 0, 0, 0, 0, 0,
|
||||
0, 119, 114, 105, 116, 101, 0, 0, 0, 5, 0, 4, 0, 9, 0, 0,
|
||||
0, 119, 114, 105, 116, 101, 0, 0, 0, 5, 0, 4, 0, 11, 0, 0,
|
||||
0, 86, 65, 76, 85, 69, 0, 0, 0, 72, 0, 5, 0, 7, 0, 0, 0, 0,
|
||||
0, 0, 0, 35, 0, 0, 0, 0, 0, 0, 0, 71, 0, 3, 0, 7, 0, 0, 0,
|
||||
3, 0, 0, 0, 71, 0, 4, 0, 9, 0, 0, 0, 34, 0, 0, 0, 0, 0, 0,
|
||||
0, 71, 0, 4, 0, 9, 0, 0, 0, 33, 0, 0, 0, 0, 0, 0, 0, 71, 0,
|
||||
4, 0, 11, 0, 0, 0, 1, 0, 0, 0, 83, 0, 0, 0, 19, 0, 2, 0, 2,
|
||||
0, 0, 0, 33, 0, 3, 0, 3, 0, 0, 0, 2, 0, 0, 0, 21, 0, 4, 0,
|
||||
6, 0, 0, 0, 32, 0, 0, 0, 1, 0, 0, 0, 30, 0, 3, 0, 7, 0, 0,
|
||||
0, 6, 0, 0, 0, 32, 0, 4, 0, 8, 0, 0, 0, 2, 0, 0, 0, 7, 0, 0,
|
||||
0, 59, 0, 4, 0, 8, 0, 0, 0, 9, 0, 0, 0, 2, 0, 0, 0, 43, 0,
|
||||
4, 0, 6, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 50, 0, 4, 0, 6,
|
||||
0, 0, 0, 11, 0, 0, 0, 239, 190, 173, 222, 32, 0, 4, 0, 12,
|
||||
0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0, 54, 0, 5, 0, 2, 0, 0, 0, 4,
|
||||
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 248, 0, 2, 0, 5, 0, 0, 0,
|
||||
65, 0, 5, 0, 12, 0, 0, 0, 13, 0, 0, 0, 9, 0, 0, 0, 10, 0, 0,
|
||||
0, 62, 0, 3, 0, 13, 0, 0, 0, 11, 0, 0, 0, 253, 0, 1, 0, 56,
|
||||
0, 1, 0];
|
||||
const MODULE: [u8; 480] = [
|
||||
3,
|
||||
2,
|
||||
35,
|
||||
7,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
8,
|
||||
0,
|
||||
14,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
17,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
11,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
71,
|
||||
76,
|
||||
83,
|
||||
76,
|
||||
46,
|
||||
115,
|
||||
116,
|
||||
100,
|
||||
46,
|
||||
52,
|
||||
53,
|
||||
48,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
14,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
15,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
109,
|
||||
97,
|
||||
105,
|
||||
110,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
16,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
17,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
194,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
109,
|
||||
97,
|
||||
105,
|
||||
110,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
7,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
79,
|
||||
117,
|
||||
116,
|
||||
112,
|
||||
117,
|
||||
116,
|
||||
0,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
7,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
119,
|
||||
114,
|
||||
105,
|
||||
116,
|
||||
101,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
119,
|
||||
114,
|
||||
105,
|
||||
116,
|
||||
101,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
11,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
86,
|
||||
65,
|
||||
76,
|
||||
85,
|
||||
69,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
72,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
7,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
35,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
71,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
7,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
71,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
34,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
71,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
33,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
71,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
11,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
83,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
19,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
33,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
21,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
32,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
30,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
7,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
32,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
8,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
7,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
59,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
8,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
43,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
10,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
50,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
11,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
239,
|
||||
190,
|
||||
173,
|
||||
222,
|
||||
32,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
12,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
6,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
54,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
4,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
248,
|
||||
0,
|
||||
2,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
65,
|
||||
0,
|
||||
5,
|
||||
0,
|
||||
12,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
13,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
10,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
62,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
13,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
11,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
253,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
56,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
];
|
||||
ShaderModule::new(device.clone(), &MODULE).unwrap()
|
||||
};
|
||||
|
||||
@ -446,41 +901,50 @@ mod tests {
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
struct Layout;
|
||||
unsafe impl PipelineLayoutDesc for Layout {
|
||||
fn num_sets(&self) -> usize { 1 }
|
||||
fn num_sets(&self) -> usize {
|
||||
1
|
||||
}
|
||||
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
|
||||
match set {
|
||||
0 => Some(1),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
|
||||
match (set, binding) {
|
||||
(0, 0) => Some(DescriptorDesc {
|
||||
ty: DescriptorDescTy::Buffer(DescriptorBufferDesc {
|
||||
dynamic: Some(false),
|
||||
storage: true,
|
||||
}),
|
||||
array_count: 1,
|
||||
stages: ShaderStages { compute: true, .. ShaderStages::none() },
|
||||
readonly: true,
|
||||
}),
|
||||
_ => None
|
||||
ty: DescriptorDescTy::Buffer(DescriptorBufferDesc {
|
||||
dynamic: Some(false),
|
||||
storage: true,
|
||||
}),
|
||||
array_count: 1,
|
||||
stages: ShaderStages {
|
||||
compute: true,
|
||||
..ShaderStages::none()
|
||||
},
|
||||
readonly: true,
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
fn num_push_constants_ranges(&self) -> usize { 0 }
|
||||
fn num_push_constants_ranges(&self) -> usize {
|
||||
0
|
||||
}
|
||||
fn push_constants_range(&self, num: usize) -> Option<PipelineLayoutDescPcRange> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
static NAME: [u8; 5] = [109, 97, 105, 110, 0]; // "main"
|
||||
static NAME: [u8; 5] = [109, 97, 105, 110, 0]; // "main"
|
||||
module.compute_entry_point(CStr::from_ptr(NAME.as_ptr() as *const _), Layout)
|
||||
};
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[allow(non_snake_case)]
|
||||
#[repr(C)]
|
||||
struct SpecConsts { VALUE: i32 }
|
||||
struct SpecConsts {
|
||||
VALUE: i32,
|
||||
}
|
||||
unsafe impl SpecializationConstants for SpecConsts {
|
||||
fn descriptors() -> &'static [SpecializationMapEntry] {
|
||||
static DESCRIPTORS: [SpecializationMapEntry; 1] = [
|
||||
@ -488,29 +952,38 @@ mod tests {
|
||||
constant_id: 83,
|
||||
offset: 0,
|
||||
size: 4,
|
||||
}
|
||||
},
|
||||
];
|
||||
&DESCRIPTORS
|
||||
}
|
||||
}
|
||||
|
||||
let pipeline = Arc::new(ComputePipeline::new(device.clone(), &shader,
|
||||
&SpecConsts { VALUE: 0x12345678 }).unwrap());
|
||||
let pipeline = Arc::new(ComputePipeline::new(device.clone(),
|
||||
&shader,
|
||||
&SpecConsts { VALUE: 0x12345678 })
|
||||
.unwrap());
|
||||
|
||||
let data_buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
|
||||
0).unwrap();
|
||||
let data_buffer = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), 0)
|
||||
.unwrap();
|
||||
let set = PersistentDescriptorSet::start(pipeline.clone(), 0)
|
||||
.add_buffer(data_buffer.clone()).unwrap()
|
||||
.build().unwrap();
|
||||
.add_buffer(data_buffer.clone())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(device.clone(),
|
||||
queue.family()).unwrap()
|
||||
.dispatch([1, 1, 1], pipeline, set, ()).unwrap()
|
||||
.build().unwrap();
|
||||
queue.family())
|
||||
.unwrap()
|
||||
.dispatch([1, 1, 1], pipeline, set, ())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let future = now(device.clone())
|
||||
.then_execute(queue.clone(), command_buffer).unwrap()
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
.then_execute(queue.clone(), command_buffer)
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
future.wait(None).unwrap();
|
||||
|
||||
let data_buffer_content = data_buffer.read().unwrap();
|
||||
|
@ -84,11 +84,16 @@ impl GraphicsPipeline<(), (), ()> {
|
||||
/// fill with the various parameters.
|
||||
pub fn start<'a>()
|
||||
-> GraphicsPipelineBuilder<SingleBufferDefinition<()>,
|
||||
EmptyEntryPointDummy, (),
|
||||
EmptyEntryPointDummy, (),
|
||||
EmptyEntryPointDummy, (),
|
||||
EmptyEntryPointDummy, (),
|
||||
EmptyEntryPointDummy, (),
|
||||
EmptyEntryPointDummy,
|
||||
(),
|
||||
EmptyEntryPointDummy,
|
||||
(),
|
||||
EmptyEntryPointDummy,
|
||||
(),
|
||||
EmptyEntryPointDummy,
|
||||
(),
|
||||
EmptyEntryPointDummy,
|
||||
(),
|
||||
()>
|
||||
{
|
||||
GraphicsPipelineBuilder::new()
|
||||
@ -316,41 +321,41 @@ impl Drop for Inner {
|
||||
/// Trait implemented on objects that reference a graphics pipeline. Can be made into a trait
|
||||
/// object.
|
||||
pub unsafe trait GraphicsPipelineAbstract: PipelineLayoutAbstract + RenderPassAbstract + VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> {
|
||||
/// Returns an opaque object that represents the inside of the graphics pipeline.
|
||||
/// Returns an opaque object that represents the inside of the graphics pipeline.
|
||||
fn inner(&self) -> GraphicsPipelineSys;
|
||||
|
||||
/// Returns the index of the subpass this graphics pipeline is rendering to.
|
||||
/// Returns the index of the subpass this graphics pipeline is rendering to.
|
||||
fn subpass_index(&self) -> u32;
|
||||
|
||||
/// Returns the subpass this graphics pipeline is rendering to.
|
||||
/// Returns the subpass this graphics pipeline is rendering to.
|
||||
#[inline]
|
||||
fn subpass(self) -> Subpass<Self> where Self: Sized {
|
||||
let index = self.subpass_index();
|
||||
Subpass::from(self, index).expect("Wrong subpass index in GraphicsPipelineAbstract::subpass")
|
||||
}
|
||||
|
||||
/// Returns true if the line width used by this pipeline is dynamic.
|
||||
/// Returns true if the line width used by this pipeline is dynamic.
|
||||
fn has_dynamic_line_width(&self) -> bool;
|
||||
|
||||
/// Returns the number of viewports and scissors of this pipeline.
|
||||
/// Returns the number of viewports and scissors of this pipeline.
|
||||
fn num_viewports(&self) -> u32;
|
||||
|
||||
/// Returns true if the viewports used by this pipeline are dynamic.
|
||||
/// Returns true if the viewports used by this pipeline are dynamic.
|
||||
fn has_dynamic_viewports(&self) -> bool;
|
||||
|
||||
/// Returns true if the scissors used by this pipeline are dynamic.
|
||||
/// Returns true if the scissors used by this pipeline are dynamic.
|
||||
fn has_dynamic_scissors(&self) -> bool;
|
||||
|
||||
/// Returns true if the depth bounds used by this pipeline are dynamic.
|
||||
/// Returns true if the depth bounds used by this pipeline are dynamic.
|
||||
fn has_dynamic_depth_bounds(&self) -> bool;
|
||||
|
||||
/// Returns true if the stencil compare masks used by this pipeline are dynamic.
|
||||
/// Returns true if the stencil compare masks used by this pipeline are dynamic.
|
||||
fn has_dynamic_stencil_compare_mask(&self) -> bool;
|
||||
|
||||
/// Returns true if the stencil write masks used by this pipeline are dynamic.
|
||||
/// Returns true if the stencil write masks used by this pipeline are dynamic.
|
||||
fn has_dynamic_stencil_write_mask(&self) -> bool;
|
||||
|
||||
/// Returns true if the stencil references used by this pipeline are dynamic.
|
||||
/// Returns true if the stencil references used by this pipeline are dynamic.
|
||||
fn has_dynamic_stencil_reference(&self) -> bool;
|
||||
}
|
||||
|
||||
|
@ -100,10 +100,10 @@ impl ShaderModule {
|
||||
/// - The input, output and layout must correctly describe the input, output and layout used
|
||||
/// by this stage.
|
||||
///
|
||||
pub unsafe fn graphics_entry_point<'a, S, I, O, L>(&'a self, name: &'a CStr, input: I, output: O,
|
||||
layout: L, ty: GraphicsShaderType)
|
||||
-> GraphicsEntryPoint<'a, S, I, O, L>
|
||||
{
|
||||
pub unsafe fn graphics_entry_point<'a, S, I, O, L>(&'a self, name: &'a CStr, input: I,
|
||||
output: O, layout: L,
|
||||
ty: GraphicsShaderType)
|
||||
-> GraphicsEntryPoint<'a, S, I, O, L> {
|
||||
GraphicsEntryPoint {
|
||||
module: self,
|
||||
name: name,
|
||||
@ -189,7 +189,7 @@ unsafe impl<'a, S, I, O, L> EntryPointAbstract for GraphicsEntryPoint<'a, S, I,
|
||||
where L: PipelineLayoutDesc,
|
||||
I: ShaderInterfaceDef,
|
||||
O: ShaderInterfaceDef,
|
||||
S: SpecializationConstants,
|
||||
S: SpecializationConstants
|
||||
{
|
||||
type PipelineLayout = L;
|
||||
type SpecializationConstants = S;
|
||||
@ -214,7 +214,7 @@ unsafe impl<'a, S, I, O, L> GraphicsEntryPointAbstract for GraphicsEntryPoint<'a
|
||||
where L: PipelineLayoutDesc,
|
||||
I: ShaderInterfaceDef,
|
||||
O: ShaderInterfaceDef,
|
||||
S: SpecializationConstants,
|
||||
S: SpecializationConstants
|
||||
{
|
||||
type InputDefinition = I;
|
||||
type OutputDefinition = O;
|
||||
@ -305,7 +305,7 @@ pub struct ComputeEntryPoint<'a, S, L> {
|
||||
|
||||
unsafe impl<'a, S, L> EntryPointAbstract for ComputeEntryPoint<'a, S, L>
|
||||
where L: PipelineLayoutDesc,
|
||||
S: SpecializationConstants,
|
||||
S: SpecializationConstants
|
||||
{
|
||||
type PipelineLayout = L;
|
||||
type SpecializationConstants = S;
|
||||
|
@ -102,8 +102,10 @@ unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for TwoBuff
|
||||
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
// FIXME: safety
|
||||
assert_eq!(source.len(), 2);
|
||||
let vertices = [source[0].size() / mem::size_of::<T>(), source[1].size() / mem::size_of::<U>()]
|
||||
.iter()
|
||||
let vertices = [
|
||||
source[0].size() / mem::size_of::<T>(),
|
||||
source[1].size() / mem::size_of::<U>(),
|
||||
].iter()
|
||||
.cloned()
|
||||
.min()
|
||||
.unwrap();
|
||||
|
@ -85,10 +85,7 @@ impl UnsafeQueryPool {
|
||||
#[inline]
|
||||
pub fn query(&self, index: u32) -> Option<UnsafeQuery> {
|
||||
if index < self.num_slots() {
|
||||
Some(UnsafeQuery {
|
||||
pool: self,
|
||||
index,
|
||||
})
|
||||
Some(UnsafeQuery { pool: self, index })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -104,10 +101,10 @@ impl UnsafeQueryPool {
|
||||
|
||||
if first_index + count < self.num_slots() {
|
||||
Some(UnsafeQueriesRange {
|
||||
pool: self,
|
||||
first: first_index,
|
||||
count,
|
||||
})
|
||||
pool: self,
|
||||
first: first_index,
|
||||
count,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -809,18 +809,18 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
assert_should_panic!({
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
5.0,
|
||||
2.0);
|
||||
});
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
5.0,
|
||||
2.0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -828,18 +828,18 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
assert_should_panic!({
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
0.5,
|
||||
0.0,
|
||||
2.0);
|
||||
});
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
0.5,
|
||||
0.0,
|
||||
2.0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -851,16 +851,16 @@ mod tests {
|
||||
|
||||
assert_should_panic!({
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::ClampToBorder(b1),
|
||||
sampler::SamplerAddressMode::ClampToBorder(b2),
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
5.0,
|
||||
2.0);
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::ClampToBorder(b1),
|
||||
sampler::SamplerAddressMode::ClampToBorder(b2),
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
5.0,
|
||||
2.0);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -296,8 +296,8 @@ pub use self::present_region::RectangleLayer;
|
||||
pub use self::surface::CapabilitiesError;
|
||||
pub use self::surface::Surface;
|
||||
pub use self::surface::SurfaceCreationError;
|
||||
pub use self::swapchain::AcquiredImage;
|
||||
pub use self::swapchain::AcquireError;
|
||||
pub use self::swapchain::AcquiredImage;
|
||||
pub use self::swapchain::PresentFuture;
|
||||
pub use self::swapchain::Swapchain;
|
||||
pub use self::swapchain::SwapchainAcquireFuture;
|
||||
|
@ -6,6 +6,7 @@
|
||||
// at your option. All files in the project carrying such
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use swapchain::Swapchain;
|
||||
use vk;
|
||||
|
||||
@ -20,7 +21,9 @@ pub struct PresentRegion {
|
||||
impl PresentRegion {
|
||||
/// Returns true if this present region is compatible with swapchain.
|
||||
pub fn is_compatible_with(&self, swapchain: &Swapchain) -> bool {
|
||||
self.rectangles.iter().all(|rect| rect.is_compatible_with(swapchain))
|
||||
self.rectangles
|
||||
.iter()
|
||||
.all(|rect| rect.is_compatible_with(swapchain))
|
||||
}
|
||||
}
|
||||
|
||||
@ -44,8 +47,8 @@ impl RectangleLayer {
|
||||
debug_assert!(self.offset[0] >= 0);
|
||||
debug_assert!(self.offset[1] >= 0);
|
||||
self.offset[0] as u32 + self.extent[0] <= swapchain.dimensions()[0] &&
|
||||
self.offset[1] as u32 + self.extent[1] <= swapchain.dimensions()[1] &&
|
||||
self.layer < swapchain.layers()
|
||||
self.offset[1] as u32 + self.extent[1] <= swapchain.dimensions()[1] &&
|
||||
self.layer < swapchain.layers()
|
||||
}
|
||||
|
||||
pub(crate) fn to_vk(&self) -> vk::RectLayerKHR {
|
||||
|
@ -131,9 +131,9 @@ pub fn present<F>(swapchain: Arc<Swapchain>, before: F, queue: Arc<Queue>, index
|
||||
/// This is just an optimizaion hint, as the vulkan driver is free to ignore the given present region.
|
||||
///
|
||||
/// If `VK_KHR_incremental_present` is not enabled on the device, the parameter will be ignored.
|
||||
pub fn present_incremental<F>(swapchain: Arc<Swapchain>, before: F, queue: Arc<Queue>, index: usize,
|
||||
present_region: PresentRegion)
|
||||
-> PresentFuture<F>
|
||||
pub fn present_incremental<F>(swapchain: Arc<Swapchain>, before: F, queue: Arc<Queue>,
|
||||
index: usize, present_region: PresentRegion)
|
||||
-> PresentFuture<F>
|
||||
where F: GpuFuture
|
||||
{
|
||||
assert!(index < swapchain.images.len());
|
||||
@ -809,14 +809,14 @@ unsafe impl GpuFuture for SwapchainAcquireFuture {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_buffer_access(&self, _: &BufferAccess, _: bool, _: &Queue)
|
||||
fn check_buffer_access(
|
||||
&self, _: &BufferAccess, _: bool, _: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
Err(AccessCheckError::Unknown)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, _: bool,
|
||||
_: &Queue)
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, _: bool, _: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
let swapchain_image = self.swapchain.raw_image(self.image_id).unwrap();
|
||||
if swapchain_image.image.internal_object() != image.inner().image.internal_object() {
|
||||
@ -854,7 +854,7 @@ impl Drop for SwapchainAcquireFuture {
|
||||
fn drop(&mut self) {
|
||||
if !*self.finished.get_mut() {
|
||||
if let Some(ref fence) = self.fence {
|
||||
fence.wait(None).unwrap(); // TODO: handle error?
|
||||
fence.wait(None).unwrap(); // TODO: handle error?
|
||||
self.semaphore = None;
|
||||
}
|
||||
|
||||
@ -863,9 +863,12 @@ impl Drop for SwapchainAcquireFuture {
|
||||
// validation layers about using a fence whose state hasn't been checked (even though
|
||||
// we know for sure that it must've been signalled).
|
||||
debug_assert!({
|
||||
let dur = Some(Duration::new(0, 0));
|
||||
self.fence.as_ref().map(|f| f.wait(dur).is_ok()).unwrap_or(true)
|
||||
});
|
||||
let dur = Some(Duration::new(0, 0));
|
||||
self.fence
|
||||
.as_ref()
|
||||
.map(|f| f.wait(dur).is_ok())
|
||||
.unwrap_or(true)
|
||||
});
|
||||
}
|
||||
|
||||
// TODO: if this future is destroyed without being presented, then eventually acquiring
|
||||
@ -999,24 +1002,32 @@ unsafe impl<P> GpuFuture for PresentFuture<P>
|
||||
Ok(match self.previous.build_submission()? {
|
||||
SubmitAnyBuilder::Empty => {
|
||||
let mut builder = SubmitPresentBuilder::new();
|
||||
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
|
||||
builder.add_swapchain(&self.swapchain,
|
||||
self.image_id as u32,
|
||||
self.present_region.as_ref());
|
||||
SubmitAnyBuilder::QueuePresent(builder)
|
||||
},
|
||||
SubmitAnyBuilder::SemaphoresWait(sem) => {
|
||||
let mut builder: SubmitPresentBuilder = sem.into();
|
||||
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
|
||||
builder.add_swapchain(&self.swapchain,
|
||||
self.image_id as u32,
|
||||
self.present_region.as_ref());
|
||||
SubmitAnyBuilder::QueuePresent(builder)
|
||||
},
|
||||
SubmitAnyBuilder::CommandBuffer(cb) => {
|
||||
cb.submit(&queue.unwrap())?; // FIXME: wrong because build_submission can be called multiple times
|
||||
let mut builder = SubmitPresentBuilder::new();
|
||||
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
|
||||
builder.add_swapchain(&self.swapchain,
|
||||
self.image_id as u32,
|
||||
self.present_region.as_ref());
|
||||
SubmitAnyBuilder::QueuePresent(builder)
|
||||
},
|
||||
SubmitAnyBuilder::BindSparse(cb) => {
|
||||
cb.submit(&queue.unwrap())?; // FIXME: wrong because build_submission can be called multiple times
|
||||
let mut builder = SubmitPresentBuilder::new();
|
||||
builder.add_swapchain(&self.swapchain, self.image_id as u32, self.present_region.as_ref());
|
||||
builder.add_swapchain(&self.swapchain,
|
||||
self.image_id as u32,
|
||||
self.present_region.as_ref());
|
||||
SubmitAnyBuilder::QueuePresent(builder)
|
||||
},
|
||||
SubmitAnyBuilder::QueuePresent(present) => {
|
||||
@ -1139,8 +1150,7 @@ pub struct AcquiredImage {
|
||||
/// a new one.
|
||||
pub unsafe fn acquire_next_image_raw(swapchain: &Swapchain, timeout: Option<Duration>,
|
||||
semaphore: Option<&Semaphore>, fence: Option<&Fence>)
|
||||
-> Result<AcquiredImage, AcquireError>
|
||||
{
|
||||
-> Result<AcquiredImage, AcquireError> {
|
||||
let vk = swapchain.device.pointers();
|
||||
|
||||
let timeout_ns = if let Some(timeout) = timeout {
|
||||
@ -1153,12 +1163,13 @@ pub unsafe fn acquire_next_image_raw(swapchain: &Swapchain, timeout: Option<Dura
|
||||
};
|
||||
|
||||
let mut out = mem::uninitialized();
|
||||
let r = check_errors(vk.AcquireNextImageKHR(swapchain.device.internal_object(),
|
||||
swapchain.swapchain,
|
||||
timeout_ns,
|
||||
semaphore.map(|s| s.internal_object()).unwrap_or(0),
|
||||
fence.map(|f| f.internal_object()).unwrap_or(0),
|
||||
&mut out))?;
|
||||
let r =
|
||||
check_errors(vk.AcquireNextImageKHR(swapchain.device.internal_object(),
|
||||
swapchain.swapchain,
|
||||
timeout_ns,
|
||||
semaphore.map(|s| s.internal_object()).unwrap_or(0),
|
||||
fence.map(|f| f.internal_object()).unwrap_or(0),
|
||||
&mut out))?;
|
||||
|
||||
let (id, suboptimal) = match r {
|
||||
Success::Success => (out as usize, false),
|
||||
|
@ -35,7 +35,6 @@ pub struct Event {
|
||||
}
|
||||
|
||||
impl Event {
|
||||
|
||||
/// Takes an event from the vulkano-provided event pool.
|
||||
/// If the pool is empty, a new event will be allocated.
|
||||
/// Upon `drop`, the event is put back into the pool.
|
||||
@ -52,10 +51,10 @@ impl Event {
|
||||
check_errors(vk.ResetEvent(device.internal_object(), raw_event))?;
|
||||
}
|
||||
Ok(Event {
|
||||
event: raw_event,
|
||||
device: device,
|
||||
must_put_in_pool: true,
|
||||
})
|
||||
event: raw_event,
|
||||
device: device,
|
||||
must_put_in_pool: true,
|
||||
})
|
||||
},
|
||||
None => {
|
||||
// Pool is empty, alloc new event
|
||||
@ -188,8 +187,8 @@ impl Drop for Event {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use sync::Event;
|
||||
use VulkanObject;
|
||||
use sync::Event;
|
||||
|
||||
#[test]
|
||||
fn event_create() {
|
||||
|
@ -69,11 +69,11 @@ impl<D> Fence<D>
|
||||
check_errors(vk.ResetFences(device.internal_object(), 1, &raw_fence))?;
|
||||
}
|
||||
Ok(Fence {
|
||||
fence: raw_fence,
|
||||
device: device,
|
||||
signaled: AtomicBool::new(false),
|
||||
must_put_in_pool: true,
|
||||
})
|
||||
fence: raw_fence,
|
||||
device: device,
|
||||
signaled: AtomicBool::new(false),
|
||||
must_put_in_pool: true,
|
||||
})
|
||||
},
|
||||
None => {
|
||||
// Pool is empty, alloc new fence
|
||||
@ -249,10 +249,10 @@ impl<D> Fence<D>
|
||||
#[inline]
|
||||
pub fn reset(&mut self) -> Result<(), OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
check_errors(vk.ResetFences(self.device.internal_object(), 1, &self.fence))?;
|
||||
self.signaled.store(false, Ordering::Relaxed);
|
||||
Ok(())
|
||||
let vk = self.device.pointers();
|
||||
check_errors(vk.ResetFences(self.device.internal_object(), 1, &self.fence))?;
|
||||
self.signaled.store(false, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -287,8 +287,8 @@ impl<D> Fence<D>
|
||||
unsafe {
|
||||
let vk = device.pointers();
|
||||
check_errors(vk.ResetFences(device.internal_object(),
|
||||
fences.len() as u32,
|
||||
fences.as_ptr()))?;
|
||||
fences.len() as u32,
|
||||
fences.as_ptr()))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -383,9 +383,9 @@ impl From<Error> for FenceWaitError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use VulkanObject;
|
||||
use std::time::Duration;
|
||||
use sync::Fence;
|
||||
use VulkanObject;
|
||||
|
||||
#[test]
|
||||
fn fence_create() {
|
||||
|
@ -249,8 +249,8 @@ pub unsafe trait GpuFuture: DeviceOwned {
|
||||
/// > **Note**: This is just a shortcut for the `Swapchain::present_incremental()` function.
|
||||
#[inline]
|
||||
fn then_swapchain_present_incremental(self, queue: Arc<Queue>, swapchain: Arc<Swapchain>,
|
||||
image_index: usize, present_region: PresentRegion)
|
||||
-> PresentFuture<Self>
|
||||
image_index: usize, present_region: PresentRegion)
|
||||
-> PresentFuture<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
swapchain::present_incremental(swapchain, self, queue, image_index, present_region)
|
||||
|
@ -127,8 +127,8 @@ impl<D> Drop for Semaphore<D>
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use sync::Semaphore;
|
||||
use VulkanObject;
|
||||
use sync::Semaphore;
|
||||
|
||||
#[test]
|
||||
fn semaphore_create() {
|
||||
|
Loading…
Reference in New Issue
Block a user