Run rustfmt on the code

This commit is contained in:
Pierre Krieger 2017-06-27 08:42:26 +02:00
parent d7c9d08f76
commit ef466eac85
114 changed files with 9010 additions and 5582 deletions

18
.rustfmt.toml Normal file
View File

@ -0,0 +1,18 @@
fn_args_density = "Compressed"
fn_args_layout = "Visual"
fn_brace_style = "SameLineWhere"
fn_call_style = "Visual"
fn_empty_single_line = false
format_strings = true
generics_indent = "Visual"
impl_empty_single_line = false
match_block_trailing_comma = true
reorder_imported_names = true
reorder_imports = true
reorder_imports_in_group = true
spaces_around_ranges = true
use_try_shorthand = true
where_density = "Tall"
where_style = "Legacy"
wrap_match_arms = false
write_mode = "Overwrite"

View File

@ -18,9 +18,15 @@ fn main() {
} else {
// Try to initialize submodules. Don't care if it fails, since this code also runs for
// the crates.io package.
let _ = Command::new("git").arg("submodule").arg("update").arg("--init").status();
let _ = Command::new("git")
.arg("submodule")
.arg("update")
.arg("--init")
.status();
cmake::build("glslang");
Path::new(&env::var("OUT_DIR").unwrap()).join("bin").join("glslangValidator")
Path::new(&env::var("OUT_DIR").unwrap())
.join("bin")
.join("glslangValidator")
};
if let Err(_) = fs::hard_link(&path, &out_file) {

View File

@ -43,11 +43,16 @@ fn compile_inner<'a, I>(shaders: I) -> Result<SpirvOutput, String>
};
let file_path = temp_dir.path().join(format!("{}{}", num, extension));
File::create(&file_path).unwrap().write_all(source.as_bytes()).unwrap();
File::create(&file_path)
.unwrap()
.write_all(source.as_bytes())
.unwrap();
command.arg(file_path);
}
let output = command.output().expect("Failed to execute glslangValidator");
let output = command
.output()
.expect("Failed to execute glslangValidator");
if output.status.success() {
let spirv_output = File::open(output_file).expect("failed to open SPIR-V output file");

View File

@ -29,10 +29,14 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
// Looping to find all the elements that have the `DescriptorSet` decoration.
for instruction in doc.instructions.iter() {
let (variable_id, descriptor_set) = match instruction {
&parse::Instruction::Decorate { target_id, decoration: enums::Decoration::DecorationDescriptorSet, ref params } => {
&parse::Instruction::Decorate {
target_id,
decoration: enums::Decoration::DecorationDescriptorSet,
ref params,
} => {
(target_id, params[0])
},
_ => continue
_ => continue,
};
// Find which type is pointed to by this variable.
@ -41,36 +45,52 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
let name = ::name_from_id(doc, variable_id);
// Find the binding point of this descriptor.
let binding = doc.instructions.iter().filter_map(|i| {
match i {
&parse::Instruction::Decorate { target_id, decoration: enums::Decoration::DecorationBinding, ref params } if target_id == variable_id => {
Some(params[0])
},
_ => None, // TODO: other types
}
}).next().expect(&format!("Uniform `{}` is missing a binding", name));
let binding = doc.instructions
.iter()
.filter_map(|i| {
match i {
&parse::Instruction::Decorate {
target_id,
decoration: enums::Decoration::DecorationBinding,
ref params,
} if target_id == variable_id => {
Some(params[0])
},
_ => None, // TODO: other types
}
})
.next()
.expect(&format!("Uniform `{}` is missing a binding", name));
// Find informations about the kind of binding for this descriptor.
let (desc_ty, readonly, array_count) = descriptor_infos(doc, pointed_ty, false).expect(&format!("Couldn't find relevant type for uniform `{}` (type {}, maybe unimplemented)", name, pointed_ty));
let (desc_ty, readonly, array_count) = descriptor_infos(doc, pointed_ty, false)
.expect(&format!("Couldn't find relevant type for uniform `{}` (type {}, maybe \
unimplemented)",
name,
pointed_ty));
descriptors.push(Descriptor {
name: name,
desc_ty: desc_ty,
set: descriptor_set,
binding: binding,
array_count: array_count,
readonly: readonly,
});
name: name,
desc_ty: desc_ty,
set: descriptor_set,
binding: binding,
array_count: array_count,
readonly: readonly,
});
}
// Looping to find all the push constant structs.
let mut push_constants_size = 0;
for instruction in doc.instructions.iter() {
let type_id = match instruction {
&parse::Instruction::TypePointer { type_id, storage_class: enums::StorageClass::StorageClassPushConstant, .. } => {
&parse::Instruction::TypePointer {
type_id,
storage_class: enums::StorageClass::StorageClassPushConstant,
..
} => {
type_id
},
_ => continue
_ => continue,
};
let (_, size, _) = ::structs::type_from_id(doc, type_id);
@ -79,54 +99,76 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
}
// Writing the body of the `descriptor` method.
let descriptor_body = descriptors.iter().map(|d| {
format!("({set}, {binding}) => Some(DescriptorDesc {{
let descriptor_body = descriptors
.iter()
.map(|d| {
format!(
"({set}, {binding}) => Some(DescriptorDesc {{
ty: {desc_ty},
array_count: {array_count},
stages: self.0.clone(),
readonly: {readonly},
}}),", set = d.set, binding = d.binding, desc_ty = d.desc_ty, array_count = d.array_count,
readonly = if d.readonly { "true" } else { "false" })
}}),",
set = d.set,
binding = d.binding,
desc_ty = d.desc_ty,
array_count = d.array_count,
readonly = if d.readonly { "true" } else { "false" }
)
}).collect::<Vec<_>>().concat();
})
.collect::<Vec<_>>()
.concat();
let num_sets = 1 + descriptors.iter().fold(0, |s, d| cmp::max(s, d.set));
// Writing the body of the `num_bindings_in_set` method.
let num_bindings_in_set_body = {
(0 .. num_sets).map(|set| {
let num = 1 + descriptors.iter().filter(|d| d.set == set)
.fold(0, |s, d| cmp::max(s, d.binding));
format!("{set} => Some({num}),", set = set, num = num)
}).collect::<Vec<_>>().concat()
(0 .. num_sets)
.map(|set| {
let num = 1 +
descriptors
.iter()
.filter(|d| d.set == set)
.fold(0, |s, d| cmp::max(s, d.binding));
format!("{set} => Some({num}),", set = set, num = num)
})
.collect::<Vec<_>>()
.concat()
};
// Writing the body of the `descriptor_by_name_body` method.
let descriptor_by_name_body = descriptors.iter().map(|d| {
format!(r#"{name:?} => Some(({set}, {binding})),"#,
name = d.name, set = d.set, binding = d.binding)
}).collect::<Vec<_>>().concat();
let descriptor_by_name_body = descriptors
.iter()
.map(|d| {
format!(r#"{name:?} => Some(({set}, {binding})),"#,
name = d.name,
set = d.set,
binding = d.binding)
})
.collect::<Vec<_>>()
.concat();
// Writing the body of the `num_push_constants_ranges` method.
let num_push_constants_ranges_body = {
if push_constants_size == 0 {
"0"
} else {
"1"
}
if push_constants_size == 0 { "0" } else { "1" }
};
// Writing the body of the `push_constants_range` method.
let push_constants_range_body = format!(r#"
let push_constants_range_body = format!(
r#"
if num != 0 || {pc_size} == 0 {{ return None; }}
Some(PipelineLayoutDescPcRange {{
offset: 0, // FIXME: not necessarily true
size: {pc_size},
stages: ShaderStages::all(), // FIXME: wrong
}})
"#, pc_size = push_constants_size);
"#,
pc_size = push_constants_size
);
format!(r#"
format!(
r#"
#[derive(Debug, Clone)]
pub struct Layout(ShaderStages);
@ -168,32 +210,45 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
}}
}}
}}
"#, num_sets = num_sets, num_bindings_in_set_body = num_bindings_in_set_body,
descriptor_by_name_body = descriptor_by_name_body, descriptor_body = descriptor_body,
num_push_constants_ranges_body = num_push_constants_ranges_body,
push_constants_range_body = push_constants_range_body)
"#,
num_sets = num_sets,
num_bindings_in_set_body = num_bindings_in_set_body,
descriptor_by_name_body = descriptor_by_name_body,
descriptor_body = descriptor_body,
num_push_constants_ranges_body = num_push_constants_ranges_body,
push_constants_range_body = push_constants_range_body
)
}
/// Assumes that `variable` is a variable with a `TypePointer` and returns the id of the pointed
/// type.
fn pointer_variable_ty(doc: &parse::Spirv, variable: u32) -> u32 {
let var_ty = doc.instructions.iter().filter_map(|i| {
match i {
&parse::Instruction::Variable { result_type_id, result_id, .. } if result_id == variable => {
Some(result_type_id)
},
_ => None
}
}).next().unwrap();
let var_ty = doc.instructions
.iter()
.filter_map(|i| match i {
&parse::Instruction::Variable {
result_type_id,
result_id,
..
} if result_id == variable => {
Some(result_type_id)
},
_ => None,
})
.next()
.unwrap();
doc.instructions.iter().filter_map(|i| {
match i {
&parse::Instruction::TypePointer { result_id, type_id, .. } if result_id == var_ty => {
Some(type_id)
},
_ => None
}
}).next().unwrap()
doc.instructions
.iter()
.filter_map(|i| match i {
&parse::Instruction::TypePointer { result_id, type_id, .. }
if result_id == var_ty => {
Some(type_id)
},
_ => None,
})
.next()
.unwrap()
}
/// Returns a `DescriptorDescTy` constructor, a bool indicating whether the descriptor is
@ -201,8 +256,7 @@ fn pointer_variable_ty(doc: &parse::Spirv, variable: u32) -> u32 {
///
/// See also section 14.5.2 of the Vulkan specs: Descriptor Set Interface
fn descriptor_infos(doc: &parse::Spirv, pointed_ty: u32, force_combined_image_sampled: bool)
-> Option<(String, bool, u64)>
{
-> Option<(String, bool, u64)> {
doc.instructions.iter().filter_map(|i| {
match i {
&parse::Instruction::TypeStruct { result_id, .. } if result_id == pointed_ty => {

View File

@ -10,74 +10,117 @@
use enums;
use parse;
use is_builtin;
use name_from_id;
use location_decoration;
use format_from_id;
use is_builtin;
use location_decoration;
use name_from_id;
pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -> (String, String) {
let (execution, ep_name, interface) = match instruction {
&parse::Instruction::EntryPoint { ref execution, ref name, ref interface, .. } => {
&parse::Instruction::EntryPoint {
ref execution,
ref name,
ref interface,
..
} => {
(execution, name, interface)
},
_ => unreachable!()
_ => unreachable!(),
};
let capitalized_ep_name: String = ep_name.chars().take(1).flat_map(|c| c.to_uppercase())
.chain(ep_name.chars().skip(1)).collect();
let capitalized_ep_name: String = ep_name
.chars()
.take(1)
.flat_map(|c| c.to_uppercase())
.chain(ep_name.chars().skip(1))
.collect();
let interface_structs = write_interface_structs(doc, &capitalized_ep_name, interface,
match *execution {
enums::ExecutionModel::ExecutionModelTessellationControl => true,
enums::ExecutionModel::ExecutionModelTessellationEvaluation => true,
enums::ExecutionModel::ExecutionModelGeometry => true,
_ => false
},
match *execution {
enums::ExecutionModel::ExecutionModelTessellationControl => true,
_ => false,
});
let interface_structs =
write_interface_structs(doc,
&capitalized_ep_name,
interface,
match *execution {
enums::ExecutionModel::ExecutionModelTessellationControl =>
true,
enums::ExecutionModel::ExecutionModelTessellationEvaluation =>
true,
enums::ExecutionModel::ExecutionModelGeometry => true,
_ => false,
},
match *execution {
enums::ExecutionModel::ExecutionModelTessellationControl =>
true,
_ => false,
});
let (ty, f_call) = match *execution {
enums::ExecutionModel::ExecutionModelVertex => {
let t = format!("::vulkano::pipeline::shader::VertexShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
let f = format!("vertex_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ vertex: true, .. ShaderStages::none() }}))", capitalized_ep_name);
let t = format!("::vulkano::pipeline::shader::VertexShaderEntryPoint<(), {0}Input, \
{0}Output, Layout>",
capitalized_ep_name);
let f = format!("vertex_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() \
as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ vertex: \
true, .. ShaderStages::none() }}))",
capitalized_ep_name);
(t, f)
},
enums::ExecutionModel::ExecutionModelTessellationControl => {
let t = format!("::vulkano::pipeline::shader::TessControlShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
let f = format!("tess_control_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ tessellation_control: true, .. ShaderStages::none() }}))", capitalized_ep_name);
let t = format!("::vulkano::pipeline::shader::TessControlShaderEntryPoint<(), \
{0}Input, {0}Output, Layout>",
capitalized_ep_name);
let f = format!("tess_control_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
tessellation_control: true, .. ShaderStages::none() }}))",
capitalized_ep_name);
(t, f)
},
enums::ExecutionModel::ExecutionModelTessellationEvaluation => {
let t = format!("::vulkano::pipeline::shader::TessEvaluationShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
let f = format!("tess_evaluation_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ tessellation_evaluation: true, .. ShaderStages::none() }}))", capitalized_ep_name);
let t = format!("::vulkano::pipeline::shader::TessEvaluationShaderEntryPoint<(), \
{0}Input, {0}Output, Layout>",
capitalized_ep_name);
let f = format!("tess_evaluation_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
tessellation_evaluation: true, .. ShaderStages::none() }}))",
capitalized_ep_name);
(t, f)
},
enums::ExecutionModel::ExecutionModelGeometry => {
let t = format!("::vulkano::pipeline::shader::GeometryShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
let f = format!("geometry_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ geometry: true, .. ShaderStages::none() }}))", capitalized_ep_name);
let t = format!("::vulkano::pipeline::shader::GeometryShaderEntryPoint<(), {0}Input, \
{0}Output, Layout>",
capitalized_ep_name);
let f = format!("geometry_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
geometry: true, .. ShaderStages::none() }}))",
capitalized_ep_name);
(t, f)
},
enums::ExecutionModel::ExecutionModelFragment => {
let t = format!("::vulkano::pipeline::shader::FragmentShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
let f = format!("fragment_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ fragment: true, .. ShaderStages::none() }}))", capitalized_ep_name);
let t = format!("::vulkano::pipeline::shader::FragmentShaderEntryPoint<(), {0}Input, \
{0}Output, Layout>",
capitalized_ep_name);
let f = format!("fragment_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
fragment: true, .. ShaderStages::none() }}))",
capitalized_ep_name);
(t, f)
},
enums::ExecutionModel::ExecutionModelGLCompute => {
(format!("::vulkano::pipeline::shader::ComputeShaderEntryPoint<(), Layout>"),
format!("compute_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), Layout(ShaderStages {{ compute: true, .. ShaderStages::none() }}))"))
format!("compute_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as \
*const _), Layout(ShaderStages {{ compute: true, .. ShaderStages::none() \
}}))"))
},
enums::ExecutionModel::ExecutionModelKernel => panic!("Kernels are not supported"),
};
let entry_point = format!(r#"
let entry_point = format!(
r#"
/// Returns a logical struct describing the entry point named `{ep_name}`.
#[inline]
#[allow(unsafe_code)]
@ -88,18 +131,24 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
self.shader.{f_call}
}}
}}
"#, ep_name = ep_name, ep_name_lenp1 = ep_name.chars().count() + 1, ty = ty,
encoded_ep_name = ep_name.chars().map(|c| (c as u32).to_string())
.collect::<Vec<String>>().join(", "),
f_call = f_call);
"#,
ep_name = ep_name,
ep_name_lenp1 = ep_name.chars().count() + 1,
ty = ty,
encoded_ep_name = ep_name
.chars()
.map(|c| (c as u32).to_string())
.collect::<Vec<String>>()
.join(", "),
f_call = f_call
);
(interface_structs, entry_point)
}
fn write_interface_structs(doc: &parse::Spirv, capitalized_ep_name: &str, interface: &[u32],
ignore_first_array_in: bool, ignore_first_array_out: bool)
-> String
{
-> String {
let mut input_elements = Vec::new();
let mut output_elements = Vec::new();
@ -107,30 +156,40 @@ fn write_interface_structs(doc: &parse::Spirv, capitalized_ep_name: &str, interf
for interface in interface.iter() {
for i in doc.instructions.iter() {
match i {
&parse::Instruction::Variable { result_type_id, result_id, ref storage_class, .. }
if &result_id == interface =>
{
&parse::Instruction::Variable {
result_type_id,
result_id,
ref storage_class,
..
} if &result_id == interface => {
if is_builtin(doc, result_id) {
continue;
}
let (to_write, ignore_first_array) = match storage_class {
&enums::StorageClass::StorageClassInput => (&mut input_elements, ignore_first_array_in),
&enums::StorageClass::StorageClassOutput => (&mut output_elements, ignore_first_array_out),
_ => continue
&enums::StorageClass::StorageClassInput => (&mut input_elements,
ignore_first_array_in),
&enums::StorageClass::StorageClassOutput => (&mut output_elements,
ignore_first_array_out),
_ => continue,
};
let name = name_from_id(doc, result_id);
if name == "__unnamed" { continue; } // FIXME: hack
if name == "__unnamed" {
continue;
} // FIXME: hack
let loc = match location_decoration(doc, result_id) {
Some(l) => l,
None => panic!("Attribute `{}` (id {}) is missing a location", name, result_id)
None => panic!("Attribute `{}` (id {}) is missing a location",
name,
result_id),
};
to_write.push((loc, name, format_from_id(doc, result_type_id, ignore_first_array)));
to_write
.push((loc, name, format_from_id(doc, result_type_id, ignore_first_array)));
},
_ => ()
_ => (),
}
}
}
@ -139,24 +198,34 @@ fn write_interface_structs(doc: &parse::Spirv, capitalized_ep_name: &str, interf
&write_interface_struct(&format!("{}Output", capitalized_ep_name), &output_elements)
}
fn write_interface_struct(struct_name: &str, attributes: &[(u32, String, (String, usize))]) -> String {
fn write_interface_struct(struct_name: &str, attributes: &[(u32, String, (String, usize))])
-> String {
// Checking for overlapping elements.
for (offset, &(loc, ref name, (_, loc_len))) in attributes.iter().enumerate() {
for &(loc2, ref name2, (_, loc_len2)) in attributes.iter().skip(offset + 1) {
if loc == loc2 || (loc < loc2 && loc + loc_len as u32 > loc2) ||
(loc2 < loc && loc2 + loc_len2 as u32 > loc)
(loc2 < loc && loc2 + loc_len2 as u32 > loc)
{
panic!("The locations of attributes `{}` (start={}, size={}) \
and `{}` (start={}, size={}) overlap",
name, loc, loc_len, name2, loc2, loc_len2);
name,
loc,
loc_len,
name2,
loc2,
loc_len2);
}
}
}
let body = attributes.iter().enumerate().map(|(num, &(loc, ref name, (ref ty, num_locs)))| {
assert!(num_locs >= 1);
let body = attributes
.iter()
.enumerate()
.map(|(num, &(loc, ref name, (ref ty, num_locs)))| {
assert!(num_locs >= 1);
format!("if self.num == {} {{
format!(
"if self.num == {} {{
self.num += 1;
return Some(::vulkano::pipeline::shader::ShaderInterfaceDefEntry {{
@ -164,39 +233,61 @@ fn write_interface_struct(struct_name: &str, attributes: &[(u32, String, (String
format: ::vulkano::format::Format::{},
name: Some(::std::borrow::Cow::Borrowed(\"{}\"))
}});
}}", num, loc, loc as usize + num_locs, ty, name)
}).collect::<Vec<_>>().join("");
}}",
num,
loc,
loc as usize + num_locs,
ty,
name
)
})
.collect::<Vec<_>>()
.join("");
format!("
format!(
"
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct {name};
#[allow(unsafe_code)]
unsafe impl ::vulkano::pipeline::shader::ShaderInterfaceDef for {name} {{
\
#[allow(unsafe_code)]
unsafe impl ::vulkano::pipeline::shader::ShaderInterfaceDef for \
{name} {{
type Iter = {name}Iter;
fn elements(&self) -> {name}Iter {{
{name}Iter {{ num: 0 }}
\
{name}Iter {{ num: 0 }}
}}
}}
#[derive(Debug, Copy, Clone)]
pub struct {name}Iter {{ num: u16 }}
\
pub struct {name}Iter {{ num: u16 }}
impl Iterator for {name}Iter {{
type Item = ::vulkano::pipeline::shader::ShaderInterfaceDefEntry;
type \
Item = ::vulkano::pipeline::shader::ShaderInterfaceDefEntry;
#[inline]
fn next(&mut self) -> Option<Self::Item> {{
\
fn next(&mut self) -> Option<Self::Item> {{
{body}
None
}}
\
}}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {{
let len = ({len} - self.num) as usize;
\
let len = ({len} - self.num) as usize;
(len, Some(len))
}}
}}
\
}}
impl ExactSizeIterator for {name}Iter {{}}
", name = struct_name, body = body, len = attributes.len())
",
name = struct_name,
body = body,
len = attributes.len()
)
}

View File

@ -17,8 +17,8 @@ use std::io::Read;
use std::io::Write;
use std::path::Path;
pub use parse::ParseError;
pub use glsl_to_spirv::ShaderType;
pub use parse::ParseError;
mod descriptor_sets;
mod entry_point;
@ -43,14 +43,16 @@ pub fn build_glsl_shaders<'a, I>(shaders: I)
let shader_content = {
let mut s = String::new();
File::open(shader).expect("failed to open shader").read_to_string(&mut s)
.expect("failed to read shader content");
File::open(shader)
.expect("failed to open shader")
.read_to_string(&mut s)
.expect("failed to read shader content");
s
};
fs::create_dir_all(&dest.join("shaders").join(shader.parent().unwrap())).unwrap();
let mut file_output = File::create(&dest.join("shaders").join(shader))
.expect("failed to open shader output");
.expect("failed to open shader output");
let content = match glsl_to_spirv::compile(&shader_content, ty) {
Ok(compiled) => compiled,
@ -65,13 +67,14 @@ pub fn reflect<R>(name: &str, mut spirv: R) -> Result<String, Error>
where R: Read
{
let mut data = Vec::new();
try!(spirv.read_to_end(&mut data));
spirv.read_to_end(&mut data)?;
// now parsing the document
let doc = try!(parse::parse_spirv(&data));
let doc = parse::parse_spirv(&data)?;
let mut output = String::new();
output.push_str(r#"
output.push_str(
r#"
#[allow(unused_imports)]
use std::sync::Arc;
#[allow(unused_imports)]
@ -109,16 +112,19 @@ pub fn reflect<R>(name: &str, mut spirv: R) -> Result<String, Error>
use vulkano::descriptor::pipeline_layout::PipelineLayoutDescNames;
#[allow(unused_imports)]
use vulkano::descriptor::pipeline_layout::PipelineLayoutDescPcRange;
"#);
"#,
);
{
// contains the data that was passed as input to this function
let spirv_data = data.iter().map(|&byte| byte.to_string())
.collect::<Vec<String>>()
.join(", ");
let spirv_data = data.iter()
.map(|&byte| byte.to_string())
.collect::<Vec<String>>()
.join(", ");
// writing the header
output.push_str(&format!(r#"
output.push_str(&format!(
r#"
pub struct {name} {{
shader: ::std::sync::Arc<::vulkano::pipeline::shader::ShaderModule>,
}}
@ -131,23 +137,29 @@ impl {name} {{
-> Result<{name}, ::vulkano::OomError>
{{
"#, name = name));
"#,
name = name
));
// checking whether each required capability is enabled in the vulkan device
for i in doc.instructions.iter() {
if let &parse::Instruction::Capability(ref cap) = i {
if let Some(cap) = capability_name(cap) {
output.push_str(&format!(r#"
output.push_str(&format!(
r#"
if !device.enabled_features().{cap} {{
panic!("capability {{:?}} not enabled", "{cap}") // FIXME: error
//return Err(CapabilityNotEnabled);
}}"#, cap = cap));
}}"#,
cap = cap
));
}
}
}
// follow-up of the header
output.push_str(&format!(r#"
output.push_str(&format!(
r#"
unsafe {{
let data = [{spirv_data}];
@ -163,7 +175,10 @@ impl {name} {{
pub fn module(&self) -> &::std::sync::Arc<::vulkano::pipeline::shader::ShaderModule> {{
&self.shader
}}
"#, name = name, spirv_data = spirv_data));
"#,
name = name,
spirv_data = spirv_data
));
// writing one method for each entry point of this module
let mut outside_impl = String::new();
@ -176,9 +191,11 @@ impl {name} {{
}
// footer
output.push_str(&format!(r#"
output.push_str(&format!(
r#"
}}
"#));
"#
));
output.push_str(&outside_impl);
@ -222,7 +239,11 @@ impl From<ParseError> for Error {
fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -> (String, usize) {
for instruction in doc.instructions.iter() {
match instruction {
&parse::Instruction::TypeInt { result_id, width, signedness } if result_id == searched => {
&parse::Instruction::TypeInt {
result_id,
width,
signedness,
} if result_id == searched => {
assert!(!ignore_first_array);
return (match (width, signedness) {
(8, true) => "R8Sint",
@ -233,18 +254,24 @@ fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -
(32, false) => "R32Uint",
(64, true) => "R64Sint",
(64, false) => "R64Uint",
_ => panic!()
}.to_owned(), 1);
_ => panic!(),
}.to_owned(),
1);
},
&parse::Instruction::TypeFloat { result_id, width } if result_id == searched => {
assert!(!ignore_first_array);
return (match width {
32 => "R32Sfloat",
64 => "R64Sfloat",
_ => panic!()
}.to_owned(), 1);
_ => panic!(),
}.to_owned(),
1);
},
&parse::Instruction::TypeVector { result_id, component_id, count } if result_id == searched => {
&parse::Instruction::TypeVector {
result_id,
component_id,
count,
} if result_id == searched => {
assert!(!ignore_first_array);
let (format, sz) = format_from_id(doc, component_id, false);
assert!(format.starts_with("R32"));
@ -252,37 +279,55 @@ fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -
let format = if count == 1 {
format
} else if count == 2 {
format!("R32G32{}", &format[3..])
format!("R32G32{}", &format[3 ..])
} else if count == 3 {
format!("R32G32B32{}", &format[3..])
format!("R32G32B32{}", &format[3 ..])
} else if count == 4 {
format!("R32G32B32A32{}", &format[3..])
format!("R32G32B32A32{}", &format[3 ..])
} else {
panic!("Found vector type with more than 4 elements")
};
return (format, sz);
},
&parse::Instruction::TypeMatrix { result_id, column_type_id, column_count } if result_id == searched => {
&parse::Instruction::TypeMatrix {
result_id,
column_type_id,
column_count,
} if result_id == searched => {
assert!(!ignore_first_array);
let (format, sz) = format_from_id(doc, column_type_id, false);
return (format, sz * column_count as usize);
},
&parse::Instruction::TypeArray { result_id, type_id, length_id } if result_id == searched => {
&parse::Instruction::TypeArray {
result_id,
type_id,
length_id,
} if result_id == searched => {
if ignore_first_array {
return format_from_id(doc, type_id, false);
}
let (format, sz) = format_from_id(doc, type_id, false);
let len = doc.instructions.iter().filter_map(|e| {
match e { &parse::Instruction::Constant { result_id, ref data, .. } if result_id == length_id => Some(data.clone()), _ => None }
}).next().expect("failed to find array length");
let len = doc.instructions
.iter()
.filter_map(|e| match e {
&parse::Instruction::Constant {
result_id,
ref data,
..
} if result_id == length_id => Some(data.clone()),
_ => None,
})
.next()
.expect("failed to find array length");
let len = len.iter().rev().fold(0u64, |a, &b| (a << 32) | b as u64);
return (format, sz * len as usize);
},
&parse::Instruction::TypePointer { result_id, type_id, .. } if result_id == searched => {
&parse::Instruction::TypePointer { result_id, type_id, .. }
if result_id == searched => {
return format_from_id(doc, type_id, ignore_first_array);
},
_ => ()
_ => (),
}
}
@ -290,8 +335,13 @@ fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -
}
fn name_from_id(doc: &parse::Spirv, searched: u32) -> String {
doc.instructions.iter().filter_map(|i| {
if let &parse::Instruction::Name { target_id, ref name } = i {
doc.instructions
.iter()
.filter_map(|i| if let &parse::Instruction::Name {
target_id,
ref name,
} = i
{
if target_id == searched {
Some(name.clone())
} else {
@ -299,14 +349,21 @@ fn name_from_id(doc: &parse::Spirv, searched: u32) -> String {
}
} else {
None
}
}).next().and_then(|n| if !n.is_empty() { Some(n) } else { None })
.unwrap_or("__unnamed".to_owned())
})
.next()
.and_then(|n| if !n.is_empty() { Some(n) } else { None })
.unwrap_or("__unnamed".to_owned())
}
fn member_name_from_id(doc: &parse::Spirv, searched: u32, searched_member: u32) -> String {
doc.instructions.iter().filter_map(|i| {
if let &parse::Instruction::MemberName { target_id, member, ref name } = i {
doc.instructions
.iter()
.filter_map(|i| if let &parse::Instruction::MemberName {
target_id,
member,
ref name,
} = i
{
if target_id == searched && member == searched_member {
Some(name.clone())
} else {
@ -314,14 +371,21 @@ fn member_name_from_id(doc: &parse::Spirv, searched: u32, searched_member: u32)
}
} else {
None
}
}).next().and_then(|n| if !n.is_empty() { Some(n) } else { None })
.unwrap_or("__unnamed".to_owned())
})
.next()
.and_then(|n| if !n.is_empty() { Some(n) } else { None })
.unwrap_or("__unnamed".to_owned())
}
fn location_decoration(doc: &parse::Spirv, searched: u32) -> Option<u32> {
doc.instructions.iter().filter_map(|i| {
if let &parse::Instruction::Decorate { target_id, decoration: enums::Decoration::DecorationLocation, ref params } = i {
doc.instructions
.iter()
.filter_map(|i| if let &parse::Instruction::Decorate {
target_id,
decoration: enums::Decoration::DecorationLocation,
ref params,
} = i
{
if target_id == searched {
Some(params[0])
} else {
@ -329,33 +393,39 @@ fn location_decoration(doc: &parse::Spirv, searched: u32) -> Option<u32> {
}
} else {
None
}
}).next()
})
.next()
}
/// Returns true if a `BuiltIn` decorator is applied on an id.
fn is_builtin(doc: &parse::Spirv, id: u32) -> bool {
for instruction in &doc.instructions {
match *instruction {
parse::Instruction::Decorate { target_id,
decoration: enums::Decoration::DecorationBuiltIn,
.. } if target_id == id =>
{
parse::Instruction::Decorate {
target_id,
decoration: enums::Decoration::DecorationBuiltIn,
..
} if target_id == id => {
return true;
},
parse::Instruction::MemberDecorate { target_id,
decoration: enums::Decoration::DecorationBuiltIn,
.. } if target_id == id =>
{
parse::Instruction::MemberDecorate {
target_id,
decoration: enums::Decoration::DecorationBuiltIn,
..
} if target_id == id => {
return true;
},
_ => ()
_ => (),
}
}
for instruction in &doc.instructions {
match *instruction {
parse::Instruction::Variable { result_type_id, result_id, .. } if result_id == id => {
parse::Instruction::Variable {
result_type_id,
result_id,
..
} if result_id == id => {
return is_builtin(doc, result_type_id);
},
parse::Instruction::TypeArray { result_id, type_id, .. } if result_id == id => {
@ -364,15 +434,20 @@ fn is_builtin(doc: &parse::Spirv, id: u32) -> bool {
parse::Instruction::TypeRuntimeArray { result_id, type_id } if result_id == id => {
return is_builtin(doc, type_id);
},
parse::Instruction::TypeStruct { result_id, ref member_types } if result_id == id => {
parse::Instruction::TypeStruct {
result_id,
ref member_types,
} if result_id == id => {
for &mem in member_types {
if is_builtin(doc, mem) { return true; }
if is_builtin(doc, mem) {
return true;
}
}
},
parse::Instruction::TypePointer { result_id, type_id, .. } if result_id == id => {
return is_builtin(doc, type_id);
},
_ => ()
_ => (),
}
}
@ -407,14 +482,21 @@ fn capability_name(cap: &enums::Capability) -> Option<&'static str> {
enums::Capability::CapabilityLiteralSampler => panic!(), // not supported
enums::Capability::CapabilityAtomicStorage => panic!(), // not supported
enums::Capability::CapabilityInt16 => Some("shader_int16"),
enums::Capability::CapabilityTessellationPointSize => Some("shader_tessellation_and_geometry_point_size"),
enums::Capability::CapabilityGeometryPointSize => Some("shader_tessellation_and_geometry_point_size"),
enums::Capability::CapabilityTessellationPointSize =>
Some("shader_tessellation_and_geometry_point_size"),
enums::Capability::CapabilityGeometryPointSize =>
Some("shader_tessellation_and_geometry_point_size"),
enums::Capability::CapabilityImageGatherExtended => Some("shader_image_gather_extended"),
enums::Capability::CapabilityStorageImageMultisample => Some("shader_storage_image_multisample"),
enums::Capability::CapabilityUniformBufferArrayDynamicIndexing => Some("shader_uniform_buffer_array_dynamic_indexing"),
enums::Capability::CapabilitySampledImageArrayDynamicIndexing => Some("shader_sampled_image_array_dynamic_indexing"),
enums::Capability::CapabilityStorageBufferArrayDynamicIndexing => Some("shader_storage_buffer_array_dynamic_indexing"),
enums::Capability::CapabilityStorageImageArrayDynamicIndexing => Some("shader_storage_image_array_dynamic_indexing"),
enums::Capability::CapabilityStorageImageMultisample =>
Some("shader_storage_image_multisample"),
enums::Capability::CapabilityUniformBufferArrayDynamicIndexing =>
Some("shader_uniform_buffer_array_dynamic_indexing"),
enums::Capability::CapabilitySampledImageArrayDynamicIndexing =>
Some("shader_sampled_image_array_dynamic_indexing"),
enums::Capability::CapabilityStorageBufferArrayDynamicIndexing =>
Some("shader_storage_buffer_array_dynamic_indexing"),
enums::Capability::CapabilityStorageImageArrayDynamicIndexing =>
Some("shader_storage_image_array_dynamic_indexing"),
enums::Capability::CapabilityClipDistance => Some("shader_clip_distance"),
enums::Capability::CapabilityCullDistance => Some("shader_cull_distance"),
enums::Capability::CapabilityImageCubeArray => Some("image_cube_array"),
@ -432,14 +514,17 @@ fn capability_name(cap: &enums::Capability) -> Option<&'static str> {
enums::Capability::CapabilitySampledBuffer => None, // always supported
enums::Capability::CapabilityImageBuffer => None, // always supported
enums::Capability::CapabilityImageMSArray => Some("shader_storage_image_multisample"),
enums::Capability::CapabilityStorageImageExtendedFormats => Some("shader_storage_image_extended_formats"),
enums::Capability::CapabilityStorageImageExtendedFormats =>
Some("shader_storage_image_extended_formats"),
enums::Capability::CapabilityImageQuery => None, // always supported
enums::Capability::CapabilityDerivativeControl => None, // always supported
enums::Capability::CapabilityInterpolationFunction => Some("sample_rate_shading"),
enums::Capability::CapabilityTransformFeedback => panic!(), // not supported
enums::Capability::CapabilityGeometryStreams => panic!(), // not supported
enums::Capability::CapabilityStorageImageReadWithoutFormat => Some("shader_storage_image_read_without_format"),
enums::Capability::CapabilityStorageImageWriteWithoutFormat => Some("shader_storage_image_write_without_format"),
enums::Capability::CapabilityStorageImageReadWithoutFormat =>
Some("shader_storage_image_read_without_format"),
enums::Capability::CapabilityStorageImageWriteWithoutFormat =>
Some("shader_storage_image_write_without_format"),
enums::Capability::CapabilityMultiViewport => Some("multi_viewport"),
}
}

View File

@ -19,15 +19,21 @@ pub fn parse_spirv(data: &[u8]) -> Result<Spirv, ParseError> {
// on the magic number at the start of the file
let data = if data[0] == 0x07 && data[1] == 0x23 && data[2] == 0x02 && data[3] == 0x03 {
// big endian
data.chunks(4).map(|c| {
((c[0] as u32) << 24) | ((c[1] as u32) << 16) | ((c[2] as u32) << 8) | c[3] as u32
}).collect::<Vec<_>>()
data.chunks(4)
.map(|c| {
((c[0] as u32) << 24) | ((c[1] as u32) << 16) | ((c[2] as u32) << 8) |
c[3] as u32
})
.collect::<Vec<_>>()
} else if data[3] == 0x07 && data[2] == 0x23 && data[1] == 0x02 && data[0] == 0x03 {
// little endian
data.chunks(4).map(|c| {
((c[3] as u32) << 24) | ((c[2] as u32) << 16) | ((c[1] as u32) << 8) | c[0] as u32
}).collect::<Vec<_>>()
data.chunks(4)
.map(|c| {
((c[3] as u32) << 24) | ((c[2] as u32) << 16) | ((c[1] as u32) << 8) |
c[0] as u32
})
.collect::<Vec<_>>()
} else {
return Err(ParseError::MissingHeader);
@ -52,9 +58,9 @@ fn parse_u32s(i: &[u32]) -> Result<Spirv, ParseError> {
let instructions = {
let mut ret = Vec::new();
let mut i = &i[5..];
let mut i = &i[5 ..];
while i.len() >= 1 {
let (instruction, rest) = try!(parse_instruction(i));
let (instruction, rest) = parse_instruction(i)?;
ret.push(instruction);
i = rest;
}
@ -62,10 +68,10 @@ fn parse_u32s(i: &[u32]) -> Result<Spirv, ParseError> {
};
Ok(Spirv {
version: version,
bound: i[3],
instructions: instructions,
})
version: version,
bound: i[3],
instructions: instructions,
})
}
/// Error that can happen when parsing.
@ -89,30 +95,90 @@ pub enum Instruction {
Unknown(u16, Vec<u32>),
Nop,
Name { target_id: u32, name: String },
MemberName { target_id: u32, member: u32, name: String },
MemberName {
target_id: u32,
member: u32,
name: String,
},
ExtInstImport { result_id: u32, name: String },
MemoryModel(AddressingModel, MemoryModel),
EntryPoint { execution: ExecutionModel, id: u32, name: String, interface: Vec<u32> },
EntryPoint {
execution: ExecutionModel,
id: u32,
name: String,
interface: Vec<u32>,
},
Capability(Capability),
TypeVoid { result_id: u32 },
TypeBool { result_id: u32 },
TypeInt { result_id: u32, width: u32, signedness: bool },
TypeInt {
result_id: u32,
width: u32,
signedness: bool,
},
TypeFloat { result_id: u32, width: u32 },
TypeVector { result_id: u32, component_id: u32, count: u32 },
TypeMatrix { result_id: u32, column_type_id: u32, column_count: u32 },
TypeImage { result_id: u32, sampled_type_id: u32, dim: Dim, depth: Option<bool>, arrayed: bool, ms: bool, sampled: Option<bool>, format: ImageFormat, access: Option<AccessQualifier> },
TypeVector {
result_id: u32,
component_id: u32,
count: u32,
},
TypeMatrix {
result_id: u32,
column_type_id: u32,
column_count: u32,
},
TypeImage {
result_id: u32,
sampled_type_id: u32,
dim: Dim,
depth: Option<bool>,
arrayed: bool,
ms: bool,
sampled: Option<bool>,
format: ImageFormat,
access: Option<AccessQualifier>,
},
TypeSampler { result_id: u32 },
TypeSampledImage { result_id: u32, image_type_id: u32 },
TypeArray { result_id: u32, type_id: u32, length_id: u32 },
TypeArray {
result_id: u32,
type_id: u32,
length_id: u32,
},
TypeRuntimeArray { result_id: u32, type_id: u32 },
TypeStruct { result_id: u32, member_types: Vec<u32> },
TypeStruct {
result_id: u32,
member_types: Vec<u32>,
},
TypeOpaque { result_id: u32, name: String },
TypePointer { result_id: u32, storage_class: StorageClass, type_id: u32 },
Constant { result_type_id: u32, result_id: u32, data: Vec<u32> },
TypePointer {
result_id: u32,
storage_class: StorageClass,
type_id: u32,
},
Constant {
result_type_id: u32,
result_id: u32,
data: Vec<u32>,
},
FunctionEnd,
Variable { result_type_id: u32, result_id: u32, storage_class: StorageClass, initializer: Option<u32> },
Decorate { target_id: u32, decoration: Decoration, params: Vec<u32> },
MemberDecorate { target_id: u32, member: u32, decoration: Decoration, params: Vec<u32> },
Variable {
result_type_id: u32,
result_id: u32,
storage_class: StorageClass,
initializer: Option<u32>,
},
Decorate {
target_id: u32,
decoration: Decoration,
params: Vec<u32>,
},
MemberDecorate {
target_id: u32,
member: u32,
decoration: Decoration,
params: Vec<u32>,
},
Label { result_id: u32 },
Branch { result_id: u32 },
Kill,
@ -130,84 +196,158 @@ fn parse_instruction(i: &[u32]) -> Result<(Instruction, &[u32]), ParseError> {
return Err(ParseError::IncompleteInstruction);
}
let opcode = try!(decode_instruction(opcode, &i[1 .. word_count]));
Ok((opcode, &i[word_count..]))
let opcode = decode_instruction(opcode, &i[1 .. word_count])?;
Ok((opcode, &i[word_count ..]))
}
fn decode_instruction(opcode: u16, operands: &[u32]) -> Result<Instruction, ParseError> {
Ok(match opcode {
0 => Instruction::Nop,
5 => Instruction::Name { target_id: operands[0], name: parse_string(&operands[1..]).0 },
6 => Instruction::MemberName { target_id: operands[0], member: operands[1], name: parse_string(&operands[2..]).0 },
11 => Instruction::ExtInstImport {
result_id: operands[0],
name: parse_string(&operands[1..]).0
},
14 => Instruction::MemoryModel(try!(AddressingModel::from_num(operands[0])), try!(MemoryModel::from_num(operands[1]))),
15 => {
let (n, r) = parse_string(&operands[2..]);
Instruction::EntryPoint {
execution: try!(ExecutionModel::from_num(operands[0])),
id: operands[1],
name: n,
interface: r.to_owned(),
}
},
17 => Instruction::Capability(try!(Capability::from_num(operands[0]))),
19 => Instruction::TypeVoid { result_id: operands[0] },
20 => Instruction::TypeBool { result_id: operands[0] },
21 => Instruction::TypeInt { result_id: operands[0], width: operands[1], signedness: operands[2] != 0 },
22 => Instruction::TypeFloat { result_id: operands[0], width: operands[1] },
23 => Instruction::TypeVector { result_id: operands[0], component_id: operands[1], count: operands[2] },
24 => Instruction::TypeMatrix { result_id: operands[0], column_type_id: operands[1], column_count: operands[2] },
25 => Instruction::TypeImage {
result_id: operands[0],
sampled_type_id: operands[1],
dim: try!(Dim::from_num(operands[2])),
depth: match operands[3] { 0 => Some(false), 1 => Some(true), 2 => None, _ => unreachable!() },
arrayed: operands[4] != 0,
ms: operands[5] != 0,
sampled: match operands[6] { 0 => None, 1 => Some(true), 2 => Some(false), _ => unreachable!() },
format: try!(ImageFormat::from_num(operands[7])),
access: if operands.len() >= 9 { Some(try!(AccessQualifier::from_num(operands[8]))) } else { None },
},
26 => Instruction::TypeSampler { result_id: operands[0] },
27 => Instruction::TypeSampledImage { result_id: operands[0], image_type_id: operands[1] },
28 => Instruction::TypeArray { result_id: operands[0], type_id: operands[1], length_id: operands[2] },
29 => Instruction::TypeRuntimeArray { result_id: operands[0], type_id: operands[1] },
30 => Instruction::TypeStruct { result_id: operands[0], member_types: operands[1..].to_owned() },
31 => Instruction::TypeOpaque { result_id: operands[0], name: parse_string(&operands[1..]).0 },
32 => Instruction::TypePointer { result_id: operands[0], storage_class: try!(StorageClass::from_num(operands[1])), type_id: operands[2] },
43 => Instruction::Constant { result_type_id: operands[0], result_id: operands[1], data: operands[2..].to_owned() },
56 => Instruction::FunctionEnd,
59 => Instruction::Variable {
result_type_id: operands[0], result_id: operands[1],
storage_class: try!(StorageClass::from_num(operands[2])),
initializer: operands.get(3).map(|&v| v)
},
71 => Instruction::Decorate { target_id: operands[0], decoration: try!(Decoration::from_num(operands[1])), params: operands[2..].to_owned() },
72 => Instruction::MemberDecorate { target_id: operands[0], member: operands[1], decoration: try!(Decoration::from_num(operands[2])), params: operands[3..].to_owned() },
248 => Instruction::Label { result_id: operands[0] },
249 => Instruction::Branch { result_id: operands[0] },
252 => Instruction::Kill,
253 => Instruction::Return,
_ => Instruction::Unknown(opcode, operands.to_owned()),
})
0 => Instruction::Nop,
5 => Instruction::Name {
target_id: operands[0],
name: parse_string(&operands[1 ..]).0,
},
6 => Instruction::MemberName {
target_id: operands[0],
member: operands[1],
name: parse_string(&operands[2 ..]).0,
},
11 => Instruction::ExtInstImport {
result_id: operands[0],
name: parse_string(&operands[1 ..]).0,
},
14 => Instruction::MemoryModel(AddressingModel::from_num(operands[0])?,
MemoryModel::from_num(operands[1])?),
15 => {
let (n, r) = parse_string(&operands[2 ..]);
Instruction::EntryPoint {
execution: ExecutionModel::from_num(operands[0])?,
id: operands[1],
name: n,
interface: r.to_owned(),
}
},
17 => Instruction::Capability(Capability::from_num(operands[0])?),
19 => Instruction::TypeVoid { result_id: operands[0] },
20 => Instruction::TypeBool { result_id: operands[0] },
21 => Instruction::TypeInt {
result_id: operands[0],
width: operands[1],
signedness: operands[2] != 0,
},
22 => Instruction::TypeFloat {
result_id: operands[0],
width: operands[1],
},
23 => Instruction::TypeVector {
result_id: operands[0],
component_id: operands[1],
count: operands[2],
},
24 => Instruction::TypeMatrix {
result_id: operands[0],
column_type_id: operands[1],
column_count: operands[2],
},
25 => Instruction::TypeImage {
result_id: operands[0],
sampled_type_id: operands[1],
dim: Dim::from_num(operands[2])?,
depth: match operands[3] {
0 => Some(false),
1 => Some(true),
2 => None,
_ => unreachable!(),
},
arrayed: operands[4] != 0,
ms: operands[5] != 0,
sampled: match operands[6] {
0 => None,
1 => Some(true),
2 => Some(false),
_ => unreachable!(),
},
format: ImageFormat::from_num(operands[7])?,
access: if operands.len() >= 9 {
Some(AccessQualifier::from_num(operands[8])?)
} else {
None
},
},
26 => Instruction::TypeSampler { result_id: operands[0] },
27 => Instruction::TypeSampledImage {
result_id: operands[0],
image_type_id: operands[1],
},
28 => Instruction::TypeArray {
result_id: operands[0],
type_id: operands[1],
length_id: operands[2],
},
29 => Instruction::TypeRuntimeArray {
result_id: operands[0],
type_id: operands[1],
},
30 => Instruction::TypeStruct {
result_id: operands[0],
member_types: operands[1 ..].to_owned(),
},
31 => Instruction::TypeOpaque {
result_id: operands[0],
name: parse_string(&operands[1 ..]).0,
},
32 => Instruction::TypePointer {
result_id: operands[0],
storage_class: StorageClass::from_num(operands[1])?,
type_id: operands[2],
},
43 => Instruction::Constant {
result_type_id: operands[0],
result_id: operands[1],
data: operands[2 ..].to_owned(),
},
56 => Instruction::FunctionEnd,
59 => Instruction::Variable {
result_type_id: operands[0],
result_id: operands[1],
storage_class: StorageClass::from_num(operands[2])?,
initializer: operands.get(3).map(|&v| v),
},
71 => Instruction::Decorate {
target_id: operands[0],
decoration: Decoration::from_num(operands[1])?,
params: operands[2 ..].to_owned(),
},
72 => Instruction::MemberDecorate {
target_id: operands[0],
member: operands[1],
decoration: Decoration::from_num(operands[2])?,
params: operands[3 ..].to_owned(),
},
248 => Instruction::Label { result_id: operands[0] },
249 => Instruction::Branch { result_id: operands[0] },
252 => Instruction::Kill,
253 => Instruction::Return,
_ => Instruction::Unknown(opcode, operands.to_owned()),
})
}
fn parse_string(data: &[u32]) -> (String, &[u32]) {
let bytes = data.iter().flat_map(|&n| {
let b1 = (n & 0xff) as u8;
let b2 = ((n >> 8) & 0xff) as u8;
let b3 = ((n >> 16) & 0xff) as u8;
let b4 = ((n >> 24) & 0xff) as u8;
vec![b1, b2, b3, b4].into_iter()
}).take_while(|&b| b != 0).collect::<Vec<u8>>();
let bytes = data.iter()
.flat_map(|&n| {
let b1 = (n & 0xff) as u8;
let b2 = ((n >> 8) & 0xff) as u8;
let b3 = ((n >> 16) & 0xff) as u8;
let b4 = ((n >> 24) & 0xff) as u8;
vec![b1, b2, b3, b4].into_iter()
})
.take_while(|&b| b != 0)
.collect::<Vec<u8>>();
let r = 1 + bytes.len() / 4;
let s = String::from_utf8(bytes).expect("Shader content is not UTF-8");
(s, &data[r..])
(s, &data[r ..])
}
#[cfg(test)]

View File

@ -9,8 +9,8 @@
use std::mem;
use parse;
use enums;
use parse;
/// Translates all the structs that are contained in the SPIR-V document as Rust structs.
pub fn write_structs(doc: &parse::Spirv) -> String {
@ -18,12 +18,15 @@ pub fn write_structs(doc: &parse::Spirv) -> String {
for instruction in &doc.instructions {
match *instruction {
parse::Instruction::TypeStruct { result_id, ref member_types } => {
parse::Instruction::TypeStruct {
result_id,
ref member_types,
} => {
let (s, _) = write_struct(doc, result_id, member_types);
result.push_str(&s);
result.push_str("\n");
},
_ => ()
_ => (),
}
}
@ -34,7 +37,7 @@ pub fn write_structs(doc: &parse::Spirv) -> String {
struct Member {
name: String,
value: String,
offset: Option<usize>
offset: Option<usize>,
}
impl Member {
@ -56,7 +59,7 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
// The members of this struct.
let mut rust_members = Vec::with_capacity(members.len());
// Padding structs will be named `_paddingN` where `N` is determined by this variable.
let mut next_padding_num = 0;
@ -72,35 +75,42 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
// Ignore the whole struct is a member is built in, which includes
// `gl_Position` for example.
if is_builtin_member(doc, struct_id, num as u32) {
return (String::new(), None); // TODO: is this correct? shouldn't it return a correct struct but with a flag or something?
return (String::new(), None); // TODO: is this correct? shouldn't it return a correct struct but with a flag or something?
}
// Finding offset of the current member, as requested by the SPIR-V code.
let spirv_offset = doc.instructions.iter().filter_map(|i| {
match *i {
parse::Instruction::MemberDecorate { target_id, member,
decoration: enums::Decoration::DecorationOffset,
ref params } if target_id == struct_id &&
member as usize == num =>
{
return Some(params[0]);
},
_ => ()
};
let spirv_offset = doc.instructions
.iter()
.filter_map(|i| {
match *i {
parse::Instruction::MemberDecorate {
target_id,
member,
decoration: enums::Decoration::DecorationOffset,
ref params,
} if target_id == struct_id && member as usize == num => {
return Some(params[0]);
},
_ => (),
};
None
}).next();
None
})
.next();
// Some structs don't have `Offset` decorations, in the case they are used as local
// variables only. Ignoring these.
let spirv_offset = match spirv_offset {
Some(o) => o as usize,
None => return (String::new(), None) // TODO: shouldn't we return and let the caller ignore it instead?
None => return (String::new(), None), // TODO: shouldn't we return and let the caller ignore it instead?
};
// We need to add a dummy field if necessary.
{
let current_rust_offset = current_rust_offset.as_mut().expect("Found runtime-sized member in non-final position");
let current_rust_offset =
current_rust_offset
.as_mut()
.expect("Found runtime-sized member in non-final position");
// Updating current_rust_offset to take the alignment of the next field into account
*current_rust_offset = if *current_rust_offset == 0 {
@ -111,12 +121,13 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
if spirv_offset != *current_rust_offset {
let diff = spirv_offset.checked_sub(*current_rust_offset).unwrap();
let padding_num = next_padding_num; next_padding_num += 1;
let padding_num = next_padding_num;
next_padding_num += 1;
rust_members.push(Member {
name: format!("_dummy{}", padding_num),
value: format!("[u8; {}]", diff),
offset: None,
});
name: format!("_dummy{}", padding_num),
value: format!("[u8; {}]", diff),
offset: None,
});
*current_rust_offset += diff;
}
}
@ -129,81 +140,105 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
}
rust_members.push(Member {
name: member_name.to_owned(),
value: ty,
offset: Some(spirv_offset),
});
name: member_name.to_owned(),
value: ty,
offset: Some(spirv_offset),
});
}
// Try determine the total size of the struct in order to add padding at the end of the struct.
let spirv_req_total_size = doc.instructions.iter().filter_map(|i| {
match *i {
parse::Instruction::Decorate { target_id,
decoration: enums::Decoration::DecorationArrayStride,
ref params } =>
{
for inst in doc.instructions.iter() {
match *inst {
parse::Instruction::TypeArray { result_id, type_id, .. }
if result_id == target_id && type_id == struct_id =>
{
return Some(params[0]);
},
parse::Instruction::TypeRuntimeArray { result_id, type_id }
if result_id == target_id && type_id == struct_id =>
{
return Some(params[0]);
},
_ => ()
}
}
let spirv_req_total_size = doc.instructions
.iter()
.filter_map(|i| match *i {
parse::Instruction::Decorate {
target_id,
decoration: enums::Decoration::DecorationArrayStride,
ref params,
} => {
for inst in doc.instructions.iter() {
match *inst {
parse::Instruction::TypeArray {
result_id, type_id, ..
} if result_id == target_id && type_id == struct_id => {
return Some(params[0]);
},
parse::Instruction::TypeRuntimeArray { result_id, type_id }
if result_id == target_id && type_id == struct_id => {
return Some(params[0]);
},
_ => (),
}
}
None
},
_ => None
}
}).fold(None, |a, b| if let Some(a) = a { assert_eq!(a, b); Some(a) } else { Some(b) });
None
},
_ => None,
})
.fold(None, |a, b| if let Some(a) = a {
assert_eq!(a, b);
Some(a)
} else {
Some(b)
});
// Adding the final padding members.
if let (Some(cur_size), Some(req_size)) = (current_rust_offset, spirv_req_total_size) {
let diff = req_size.checked_sub(cur_size as u32).unwrap();
if diff >= 1 {
rust_members.push(Member {
name: format!("_dummy{}", next_padding_num),
value: format!("[u8; {}]", diff),
offset: None,
});
name: format!("_dummy{}", next_padding_num),
value: format!("[u8; {}]", diff),
offset: None,
});
}
}
// We can only implement Clone if there's no unsized member in the struct.
let (impl_text, derive_text) = if current_rust_offset.is_some() {
let i = format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n \
{name} {{\n{copies}\n }}\n }}\n}}\n", name = name,
copies = rust_members.iter().map(Member::copy_text).collect::<Vec<_>>().join(",\n"));
let i =
format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n {name} \
{{\n{copies}\n }}\n }}\n}}\n",
name = name,
copies = rust_members
.iter()
.map(Member::copy_text)
.collect::<Vec<_>>()
.join(",\n"));
(i, "#[derive(Copy)]")
} else {
("".to_owned(), "")
};
let s = format!("#[repr(C)]{derive_text}\npub struct {name} {{\n{members}\n}} /* total_size: {t:?} */\n{impl_text}",
name = name,
members = rust_members.iter().map(Member::declaration_text).collect::<Vec<_>>().join(",\n"),
t = spirv_req_total_size, impl_text = impl_text, derive_text = derive_text);
(s, spirv_req_total_size.map(|sz| sz as usize).or(current_rust_offset))
let s = format!("#[repr(C)]{derive_text}\npub struct {name} {{\n{members}\n}} /* total_size: \
{t:?} */\n{impl_text}",
name = name,
members = rust_members
.iter()
.map(Member::declaration_text)
.collect::<Vec<_>>()
.join(",\n"),
t = spirv_req_total_size,
impl_text = impl_text,
derive_text = derive_text);
(s,
spirv_req_total_size
.map(|sz| sz as usize)
.or(current_rust_offset))
}
/// Returns true if a `BuiltIn` decorator is applied on a struct member.
fn is_builtin_member(doc: &parse::Spirv, id: u32, member_id: u32) -> bool {
for instruction in &doc.instructions {
match *instruction {
parse::Instruction::MemberDecorate { target_id, member,
decoration: enums::Decoration::DecorationBuiltIn,
.. } if target_id == id && member == member_id =>
{
parse::Instruction::MemberDecorate {
target_id,
member,
decoration: enums::Decoration::DecorationBuiltIn,
..
} if target_id == id && member == member_id => {
return true;
},
_ => ()
_ => (),
}
}
@ -217,103 +252,182 @@ pub fn type_from_id(doc: &parse::Spirv, searched: u32) -> (String, Option<usize>
for instruction in doc.instructions.iter() {
match instruction {
&parse::Instruction::TypeBool { result_id } if result_id == searched => {
#[repr(C)] struct Foo { data: bool, after: u8 }
#[repr(C)]
struct Foo {
data: bool,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("bool".to_owned(), Some(size), mem::align_of::<Foo>())
return ("bool".to_owned(), Some(size), mem::align_of::<Foo>());
},
&parse::Instruction::TypeInt { result_id, width, signedness } if result_id == searched => {
&parse::Instruction::TypeInt {
result_id,
width,
signedness,
} if result_id == searched => {
match (width, signedness) {
(8, true) => {
#[repr(C)] struct Foo { data: i8, after: u8 }
#[repr(C)]
struct Foo {
data: i8,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("i8".to_owned(), Some(size), mem::align_of::<Foo>())
return ("i8".to_owned(), Some(size), mem::align_of::<Foo>());
},
(8, false) => {
#[repr(C)] struct Foo { data: u8, after: u8 }
#[repr(C)]
struct Foo {
data: u8,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("u8".to_owned(), Some(size), mem::align_of::<Foo>())
return ("u8".to_owned(), Some(size), mem::align_of::<Foo>());
},
(16, true) => {
#[repr(C)] struct Foo { data: i16, after: u8 }
#[repr(C)]
struct Foo {
data: i16,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("i16".to_owned(), Some(size), mem::align_of::<Foo>())
return ("i16".to_owned(), Some(size), mem::align_of::<Foo>());
},
(16, false) => {
#[repr(C)] struct Foo { data: u16, after: u8 }
#[repr(C)]
struct Foo {
data: u16,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("u16".to_owned(), Some(size), mem::align_of::<Foo>())
return ("u16".to_owned(), Some(size), mem::align_of::<Foo>());
},
(32, true) => {
#[repr(C)] struct Foo { data: i32, after: u8 }
#[repr(C)]
struct Foo {
data: i32,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("i32".to_owned(), Some(size), mem::align_of::<Foo>())
return ("i32".to_owned(), Some(size), mem::align_of::<Foo>());
},
(32, false) => {
#[repr(C)] struct Foo { data: u32, after: u8 }
#[repr(C)]
struct Foo {
data: u32,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("u32".to_owned(), Some(size), mem::align_of::<Foo>())
return ("u32".to_owned(), Some(size), mem::align_of::<Foo>());
},
(64, true) => {
#[repr(C)] struct Foo { data: i64, after: u8 }
#[repr(C)]
struct Foo {
data: i64,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("i64".to_owned(), Some(size), mem::align_of::<Foo>())
return ("i64".to_owned(), Some(size), mem::align_of::<Foo>());
},
(64, false) => {
#[repr(C)] struct Foo { data: u64, after: u8 }
#[repr(C)]
struct Foo {
data: u64,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("u64".to_owned(), Some(size), mem::align_of::<Foo>())
return ("u64".to_owned(), Some(size), mem::align_of::<Foo>());
},
_ => panic!("No Rust equivalent for an integer of width {}", width)
_ => panic!("No Rust equivalent for an integer of width {}", width),
}
},
&parse::Instruction::TypeFloat { result_id, width } if result_id == searched => {
match width {
32 => {
#[repr(C)] struct Foo { data: f32, after: u8 }
#[repr(C)]
struct Foo {
data: f32,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("f32".to_owned(), Some(size), mem::align_of::<Foo>())
return ("f32".to_owned(), Some(size), mem::align_of::<Foo>());
},
64 => {
#[repr(C)] struct Foo { data: f64, after: u8 }
#[repr(C)]
struct Foo {
data: f64,
after: u8,
}
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
return ("f64".to_owned(), Some(size), mem::align_of::<Foo>())
return ("f64".to_owned(), Some(size), mem::align_of::<Foo>());
},
_ => panic!("No Rust equivalent for a floating-point of width {}", width)
_ => panic!("No Rust equivalent for a floating-point of width {}", width),
}
},
&parse::Instruction::TypeVector { result_id, component_id, count } if result_id == searched => {
&parse::Instruction::TypeVector {
result_id,
component_id,
count,
} if result_id == searched => {
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
let (t, t_size, t_align) = type_from_id(doc, component_id);
return (format!("[{}; {}]", t, count), t_size.map(|s| s * count as usize), t_align);
},
&parse::Instruction::TypeMatrix { result_id, column_type_id, column_count } if result_id == searched => {
&parse::Instruction::TypeMatrix {
result_id,
column_type_id,
column_count,
} if result_id == searched => {
// FIXME: row-major or column-major
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
let (t, t_size, t_align) = type_from_id(doc, column_type_id);
return (format!("[{}; {}]", t, column_count), t_size.map(|s| s * column_count as usize), t_align);
return (format!("[{}; {}]", t, column_count),
t_size.map(|s| s * column_count as usize),
t_align);
},
&parse::Instruction::TypeArray { result_id, type_id, length_id } if result_id == searched => {
&parse::Instruction::TypeArray {
result_id,
type_id,
length_id,
} if result_id == searched => {
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
let (t, t_size, t_align) = type_from_id(doc, type_id);
let len = doc.instructions.iter().filter_map(|e| {
match e { &parse::Instruction::Constant { result_id, ref data, .. } if result_id == length_id => Some(data.clone()), _ => None }
}).next().expect("failed to find array length");
let len = doc.instructions
.iter()
.filter_map(|e| match e {
&parse::Instruction::Constant {
result_id,
ref data,
..
} if result_id == length_id => Some(data.clone()),
_ => None,
})
.next()
.expect("failed to find array length");
let len = len.iter().rev().fold(0u64, |a, &b| (a << 32) | b as u64);
return (format!("[{}; {}]", t, len), t_size.map(|s| s * len as usize), t_align); // FIXME:
return (format!("[{}; {}]", t, len), t_size.map(|s| s * len as usize), t_align); // FIXME:
},
&parse::Instruction::TypeRuntimeArray { result_id, type_id } if result_id == searched => {
&parse::Instruction::TypeRuntimeArray { result_id, type_id }
if result_id == searched => {
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
let (t, _, t_align) = type_from_id(doc, type_id);
return (format!("[{}]", t), None, t_align);
},
&parse::Instruction::TypeStruct { result_id, ref member_types } if result_id == searched => {
&parse::Instruction::TypeStruct {
result_id,
ref member_types,
} if result_id == searched => {
// TODO: take the Offset member decorate into account?
let name = ::name_from_id(doc, result_id);
let (_, size) = write_struct(doc, result_id, member_types);
let align = member_types.iter().map(|&t| type_from_id(doc, t).2).max().unwrap_or(1);
let align = member_types
.iter()
.map(|&t| type_from_id(doc, t).2)
.max()
.unwrap_or(1);
return (name, size, align);
},
_ => ()
_ => (),
}
}

View File

@ -22,13 +22,13 @@ use winit::{EventsLoop, WindowBuilder};
use winit::CreationError as WindowCreationError;
#[cfg(target_os = "macos")]
use objc::runtime::{YES};
use cocoa::appkit::{NSView, NSWindow};
#[cfg(target_os = "macos")]
use cocoa::base::id as cocoa_id;
#[cfg(target_os = "macos")]
use cocoa::appkit::{NSWindow, NSView};
#[cfg(target_os = "macos")]
use metal::*;
#[cfg(target_os = "macos")]
use objc::runtime::YES;
#[cfg(target_os = "macos")]
use std::mem;
@ -49,23 +49,25 @@ pub fn required_extensions() -> InstanceExtensions {
match InstanceExtensions::supported_by_core() {
Ok(supported) => supported.intersection(&ideal),
Err(_) => InstanceExtensions::none()
Err(_) => InstanceExtensions::none(),
}
}
pub trait VkSurfaceBuild {
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>) -> Result<Window, CreationError>;
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>)
-> Result<Window, CreationError>;
}
impl VkSurfaceBuild for WindowBuilder {
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>) -> Result<Window, CreationError> {
let window = try!(self.build(events_loop));
let surface = try!(unsafe { winit_to_surface(instance, &window) });
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>)
-> Result<Window, CreationError> {
let window = self.build(events_loop)?;
let surface = unsafe { winit_to_surface(instance, &window) }?;
Ok(Window {
window: window,
surface: surface,
})
window: window,
surface: surface,
})
}
}
@ -135,16 +137,14 @@ impl From<WindowCreationError> for CreationError {
}
#[cfg(target_os = "android")]
unsafe fn winit_to_surface(instance: Arc<Instance>,
win: &winit::Window)
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
-> Result<Arc<Surface>, SurfaceCreationError> {
use winit::os::android::WindowExt;
Surface::from_anativewindow(instance, win.get_native_window())
}
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
unsafe fn winit_to_surface(instance: Arc<Instance>,
win: &winit::Window)
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
-> Result<Arc<Surface>, SurfaceCreationError> {
use winit::os::unix::WindowExt;
match (win.get_wayland_display(), win.get_wayland_surface()) {
@ -158,16 +158,15 @@ unsafe fn winit_to_surface(instance: Arc<Instance>,
win.get_xlib_window().unwrap() as _)
} else {
Surface::from_xcb(instance,
win.get_xcb_connection().unwrap(),
win.get_xlib_window().unwrap() as _)
win.get_xcb_connection().unwrap(),
win.get_xlib_window().unwrap() as _)
}
}
},
}
}
#[cfg(target_os = "windows")]
unsafe fn winit_to_surface(instance: Arc<Instance>,
win: &winit::Window)
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
-> Result<Arc<Surface>, SurfaceCreationError> {
use winit::os::windows::WindowExt;
Surface::from_hwnd(instance,
@ -177,8 +176,7 @@ unsafe fn winit_to_surface(instance: Arc<Instance>,
#[cfg(target_os = "macos")]
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
-> Result<Arc<Surface>, SurfaceCreationError>
{
-> Result<Arc<Surface>, SurfaceCreationError> {
use winit::os::macos::WindowExt;
unsafe {
@ -192,7 +190,7 @@ unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
let view = wnd.contentView();
view.setWantsLayer(YES);
view.setLayer(mem::transmute(layer.0)); // Bombs here with out of memory
view.setLayer(mem::transmute(layer.0)); // Bombs here with out of memory
}
Surface::from_macos_moltenvk(instance, win.get_nsview() as *const ())

View File

@ -1,22 +1,23 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::env;
fn main() {
let target = env::var("TARGET").unwrap();
if target.contains("apple-darwin") {
println!("cargo:rustc-link-search=framework={}", "/Library/Frameworks"); // TODO: necessary?
println!("cargo:rustc-link-lib=c++");
println!("cargo:rustc-link-lib=framework=MoltenVK");
println!("cargo:rustc-link-lib=framework=QuartzCore");
println!("cargo:rustc-link-lib=framework=Metal");
println!("cargo:rustc-link-lib=framework=Foundation");
}
}
use std::env;
fn main() {
let target = env::var("TARGET").unwrap();
if target.contains("apple-darwin") {
println!("cargo:rustc-link-search=framework={}",
"/Library/Frameworks"); // TODO: necessary?
println!("cargo:rustc-link-lib=c++");
println!("cargo:rustc-link-lib=framework=MoltenVK");
println!("cargo:rustc-link-lib=framework=QuartzCore");
println!("cargo:rustc-link-lib=framework=Metal");
println!("cargo:rustc-link-lib=framework=Foundation");
}
}

View File

@ -8,14 +8,15 @@
// according to those terms.
//! Buffer whose content is accessible to the CPU.
//!
//!
//! The `CpuAccessibleBuffer` is a basic general-purpose buffer. It can be used in any situation
//! but may not perform as well as other buffer types.
//!
//!
//! Each access from the CPU or from the GPU locks the whole buffer for either reading or writing.
//! You can read the buffer multiple times simultaneously. Trying to read and write simultaneously,
//! or write and write simultaneously will block.
use smallvec::SmallVec;
use std::marker::PhantomData;
use std::mem;
use std::ops::Deref;
@ -26,12 +27,11 @@ use std::sync::RwLock;
use std::sync::RwLockReadGuard;
use std::sync::RwLockWriteGuard;
use std::sync::TryLockError;
use smallvec::SmallVec;
use buffer::BufferUsage;
use buffer::sys::BufferCreationError;
use buffer::sys::SparseLevel;
use buffer::sys::UnsafeBuffer;
use buffer::BufferUsage;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
@ -46,15 +46,17 @@ use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::StdMemoryPool;
use sync::AccessError;
use sync::Sharing;
use sync::AccessFlagBits;
use sync::PipelineStages;
use sync::Sharing;
use OomError;
/// Buffer whose content is accessible by the CPU.
#[derive(Debug)]
pub struct CpuAccessibleBuffer<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool {
pub struct CpuAccessibleBuffer<T: ?Sized, A = Arc<StdMemoryPool>>
where A: MemoryPool
{
// Inner content.
inner: UnsafeBuffer,
@ -80,21 +82,18 @@ impl<T> CpuAccessibleBuffer<T> {
-> Result<Arc<CpuAccessibleBuffer<T>>, OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
unsafe {
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
}
unsafe { CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families) }
}
/// Builds a new buffer with some data in it. Only allowed for sized data.
pub fn from_data<'a, I>(device: Arc<Device>, usage: BufferUsage, queue_families: I, data: T)
-> Result<Arc<CpuAccessibleBuffer<T>>, OomError>
where I: IntoIterator<Item = QueueFamily<'a>>,
T: Content + 'static,
T: Content + 'static
{
unsafe {
let uninitialized = try!(
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
);
let uninitialized =
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)?;
// Note that we are in panic-unsafety land here. However a panic should never ever
// happen here, so in theory we are safe.
@ -129,9 +128,10 @@ impl<T> CpuAccessibleBuffer<[T]> {
Q: IntoIterator<Item = QueueFamily<'a>>
{
unsafe {
let uninitialized = try!(
CpuAccessibleBuffer::uninitialized_array(device, data.len(), usage, queue_families)
);
let uninitialized = CpuAccessibleBuffer::uninitialized_array(device,
data.len(),
usage,
queue_families)?;
// Note that we are in panic-unsafety land here. However a panic should never ever
// happen here, so in theory we are safe.
@ -154,12 +154,10 @@ impl<T> CpuAccessibleBuffer<[T]> {
#[inline]
#[deprecated]
pub fn array<'a, I>(device: Arc<Device>, len: usize, usage: BufferUsage, queue_families: I)
-> Result<Arc<CpuAccessibleBuffer<[T]>>, OomError>
-> Result<Arc<CpuAccessibleBuffer<[T]>>, OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
unsafe {
CpuAccessibleBuffer::uninitialized_array(device, len, usage, queue_families)
}
unsafe { CpuAccessibleBuffer::uninitialized_array(device, len, usage, queue_families) }
}
/// Builds a new buffer. Can be used for arrays.
@ -180,12 +178,15 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
///
/// You must ensure that the size that you pass is correct for `T`.
///
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage,
queue_families: I)
-> Result<Arc<CpuAccessibleBuffer<T>>, OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families.into_iter().map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let queue_families = queue_families
.into_iter()
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
@ -197,33 +198,41 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::OomError(err)) => return Err(err),
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
// errors can't happen
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
}
};
let mem_ty = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_host_visible())
.next().unwrap(); // Vk specs guarantee that this can't fail
let mem_ty = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_host_visible())
.next()
.unwrap(); // Vk specs guarantee that this can't fail
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Linear)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
debug_assert!(mem.mapped_memory().is_some());
try!(buffer.bind_memory(mem.memory(), mem.offset()));
buffer.bind_memory(mem.memory(), mem.offset())?;
Ok(Arc::new(CpuAccessibleBuffer {
inner: buffer,
memory: mem,
access: RwLock::new(()),
queue_families: queue_families,
marker: PhantomData,
}))
inner: buffer,
memory: mem,
access: RwLock::new(()),
queue_families: queue_families,
marker: PhantomData,
}))
}
}
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where A: MemoryPool {
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A>
where A: MemoryPool
{
/// Returns the device used to create this buffer.
#[inline]
pub fn device(&self) -> &Arc<Device> {
@ -234,13 +243,22 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where A: MemoryPool {
// TODO: use a custom iterator
#[inline]
pub fn queue_families(&self) -> Vec<QueueFamily> {
self.queue_families.iter().map(|&num| {
self.device().physical_device().queue_family_by_id(num).unwrap()
}).collect()
self.queue_families
.iter()
.map(|&num| {
self.device()
.physical_device()
.queue_family_by_id(num)
.unwrap()
})
.collect()
}
}
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where T: Content + 'static, A: MemoryPool {
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A>
where T: Content + 'static,
A: MemoryPool
{
/// Locks the buffer in order to write its content.
///
/// If the buffer is currently in use by the GPU, this function will block until either the
@ -251,15 +269,15 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where T: Content + 'static, A: Memo
/// that uses it will block until you unlock it.
#[inline]
pub fn read(&self) -> Result<ReadLock<T>, TryLockError<RwLockReadGuard<()>>> {
let lock = try!(self.access.try_read());
let lock = self.access.try_read()?;
let offset = self.memory.offset();
let range = offset .. offset + self.inner.size();
Ok(ReadLock {
inner: unsafe { self.memory.mapped_memory().unwrap().read_write(range) },
lock: lock,
})
inner: unsafe { self.memory.mapped_memory().unwrap().read_write(range) },
lock: lock,
})
}
/// Locks the buffer in order to write its content.
@ -272,20 +290,21 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where T: Content + 'static, A: Memo
/// that uses it will block until you unlock it.
#[inline]
pub fn write(&self) -> Result<WriteLock<T>, TryLockError<RwLockWriteGuard<()>>> {
let lock = try!(self.access.try_write());
let lock = self.access.try_write()?;
let offset = self.memory.offset();
let range = offset .. offset + self.inner.size();
Ok(WriteLock {
inner: unsafe { self.memory.mapped_memory().unwrap().read_write(range) },
lock: lock,
})
inner: unsafe { self.memory.mapped_memory().unwrap().read_write(range) },
lock: lock,
})
}
}
unsafe impl<T: ?Sized, A> BufferAccess for CpuAccessibleBuffer<T, A>
where T: 'static + Send + Sync, A: MemoryPool
where T: 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn inner(&self) -> BufferInner {
@ -302,7 +321,7 @@ unsafe impl<T: ?Sized, A> BufferAccess for CpuAccessibleBuffer<T, A>
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, queue: &Queue) -> Result<(), AccessError> {
Ok(()) // FIXME:
Ok(()) // FIXME:
}
#[inline]
@ -317,7 +336,8 @@ unsafe impl<T: ?Sized, A> BufferAccess for CpuAccessibleBuffer<T, A>
}
unsafe impl<T: ?Sized, A> TypedBufferAccess for CpuAccessibleBuffer<T, A>
where T: 'static + Send + Sync, A: MemoryPool
where T: 'static + Send + Sync,
A: MemoryPool
{
type Content = T;
}
@ -417,7 +437,7 @@ impl<'a, T: ?Sized + 'a> DerefMut for WriteLock<'a, T> {
#[cfg(test)]
mod tests {
use buffer::{CpuAccessibleBuffer, BufferUsage};
use buffer::{BufferUsage, CpuAccessibleBuffer};
#[test]
fn create_empty_buffer() {
@ -425,6 +445,9 @@ mod tests {
const EMPTY: [i32; 0] = [];
let _ = CpuAccessibleBuffer::from_data(device, BufferUsage::all(), Some(queue.family()), EMPTY.iter());
let _ = CpuAccessibleBuffer::from_data(device,
BufferUsage::all(),
Some(queue.family()),
EMPTY.iter());
}
}

View File

@ -7,21 +7,21 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::iter;
use std::marker::PhantomData;
use std::mem;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::MutexGuard;
use smallvec::SmallVec;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use buffer::BufferUsage;
use buffer::sys::BufferCreationError;
use buffer::sys::SparseLevel;
use buffer::sys::UnsafeBuffer;
use buffer::BufferUsage;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
@ -56,7 +56,9 @@ use OomError;
/// The `CpuBufferPool` struct internally contains an `Arc`. You can clone the `CpuBufferPool` for
/// a cheap cost, and all the clones will share the same underlying buffer.
///
pub struct CpuBufferPool<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool {
pub struct CpuBufferPool<T: ?Sized, A = Arc<StdMemoryPool>>
where A: MemoryPool
{
// The device of the pool.
device: Arc<Device>,
@ -80,7 +82,9 @@ pub struct CpuBufferPool<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool
}
// One buffer of the pool.
struct ActualBuffer<A> where A: MemoryPool {
struct ActualBuffer<A>
where A: MemoryPool
{
// Inner content.
inner: UnsafeBuffer,
@ -111,7 +115,9 @@ struct ActualBufferSubbuffer {
/// A subbuffer allocated from a `CpuBufferPool`.
///
/// When this object is destroyed, the subbuffer is automatically reclaimed by the pool.
pub struct CpuBufferPoolSubbuffer<T: ?Sized, A> where A: MemoryPool {
pub struct CpuBufferPoolSubbuffer<T: ?Sized, A>
where A: MemoryPool
{
buffer: Arc<ActualBuffer<A>>,
// Index of the subbuffer within `buffer`.
@ -130,9 +136,7 @@ impl<T> CpuBufferPool<T> {
-> CpuBufferPool<T>
where I: IntoIterator<Item = QueueFamily<'a>>
{
unsafe {
CpuBufferPool::raw(device, mem::size_of::<T>(), usage, queue_families)
}
unsafe { CpuBufferPool::raw(device, mem::size_of::<T>(), usage, queue_families) }
}
/// Builds a `CpuBufferPool` meant for simple uploads.
@ -148,22 +152,23 @@ impl<T> CpuBufferPool<T> {
impl<T> CpuBufferPool<[T]> {
#[inline]
pub fn array<'a, I>(device: Arc<Device>, len: usize, usage: BufferUsage, queue_families: I)
-> CpuBufferPool<[T]>
-> CpuBufferPool<[T]>
where I: IntoIterator<Item = QueueFamily<'a>>
{
unsafe {
CpuBufferPool::raw(device, mem::size_of::<T>() * len, usage, queue_families)
}
unsafe { CpuBufferPool::raw(device, mem::size_of::<T>() * len, usage, queue_families) }
}
}
impl<T: ?Sized> CpuBufferPool<T> {
pub unsafe fn raw<'a, I>(device: Arc<Device>, one_size: usize,
usage: BufferUsage, queue_families: I) -> CpuBufferPool<T>
pub unsafe fn raw<'a, I>(device: Arc<Device>, one_size: usize, usage: BufferUsage,
queue_families: I)
-> CpuBufferPool<T>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families.into_iter().map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let queue_families = queue_families
.into_iter()
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let pool = Device::standard_pool(&device);
@ -187,7 +192,9 @@ impl<T: ?Sized> CpuBufferPool<T> {
}
}
impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
impl<T, A> CpuBufferPool<T, A>
where A: MemoryPool
{
/// Sets the capacity to `capacity`, or does nothing if the capacity is already higher.
///
/// Since this can involve a memory allocation, an `OomError` can happen.
@ -197,9 +204,9 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
// Check current capacity.
match *cur_buf {
Some(ref buf) if buf.capacity >= capacity => {
return Ok(())
return Ok(());
},
_ => ()
_ => (),
};
self.reset_buf(&mut cur_buf, capacity)
@ -225,11 +232,11 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
None => 3,
};
self.reset_buf(&mut mutex, next_capacity).unwrap(); /* FIXME: error */
self.reset_buf(&mut mutex, next_capacity).unwrap(); /* FIXME: error */
match self.try_next_impl(&mut mutex, data) {
Ok(n) => n,
Err(_) => unreachable!()
Err(_) => unreachable!(),
}
}
@ -246,7 +253,9 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
}
// Creates a new buffer and sets it as current.
fn reset_buf(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>, capacity: usize) -> Result<(), OomError> {
fn reset_buf(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>,
capacity: usize)
-> Result<(), OomError> {
unsafe {
let (buffer, mem_reqs) = {
let sharing = if self.queue_families.len() >= 2 {
@ -260,41 +269,52 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
None => return Err(OomError::OutOfDeviceMemory),
};
match UnsafeBuffer::new(self.device.clone(), total_size, self.usage, sharing, SparseLevel::none()) {
match UnsafeBuffer::new(self.device.clone(),
total_size,
self.usage,
sharing,
SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::OomError(err)) => return Err(err),
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
// errors can't happen
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
}
};
let mem_ty = self.device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_host_visible())
.next().unwrap(); // Vk specs guarantee that this can't fail
let mem_ty = self.device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_host_visible())
.next()
.unwrap(); // Vk specs guarantee that this can't fail
let mem = try!(MemoryPool::alloc(&self.pool, mem_ty,
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
let mem = MemoryPool::alloc(&self.pool,
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Linear)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
debug_assert!(mem.mapped_memory().is_some());
try!(buffer.bind_memory(mem.memory(), mem.offset()));
buffer.bind_memory(mem.memory(), mem.offset())?;
**cur_buf_mutex = Some(Arc::new(ActualBuffer {
inner: buffer,
memory: mem,
subbuffers: {
let mut v = Vec::with_capacity(capacity);
for _ in 0 .. capacity {
v.push(ActualBufferSubbuffer {
num_cpu_accesses: AtomicUsize::new(0),
num_gpu_accesses: AtomicUsize::new(0),
});
}
v
},
capacity: capacity,
next_subbuffer: AtomicUsize::new(0),
}));
**cur_buf_mutex =
Some(Arc::new(ActualBuffer {
inner: buffer,
memory: mem,
subbuffers: {
let mut v = Vec::with_capacity(capacity);
for _ in 0 .. capacity {
v.push(ActualBufferSubbuffer {
num_cpu_accesses: AtomicUsize::new(0),
num_gpu_accesses: AtomicUsize::new(0),
});
}
v
},
capacity: capacity,
next_subbuffer: AtomicUsize::new(0),
}));
Ok(())
}
@ -302,12 +322,11 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
// Tries to lock a subbuffer from the current buffer.
fn try_next_impl(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>, data: T)
-> Result<CpuBufferPoolSubbuffer<T, A>, T>
{
-> Result<CpuBufferPoolSubbuffer<T, A>, T> {
// Grab the current buffer. Return `Err` if the pool wasn't "initialized" yet.
let current_buffer = match cur_buf_mutex.clone() {
Some(b) => b,
None => return Err(data)
None => return Err(data),
};
// Grab the next subbuffer to use.
@ -315,38 +334,51 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
// Since the only place that touches `next_subbuffer` is this code, and since we own a
// mutex lock to the buffer, it means that `next_subbuffer` can't be accessed
// concurrently.
let val = current_buffer.next_subbuffer.fetch_add(1, Ordering::Relaxed);
let val = current_buffer
.next_subbuffer
.fetch_add(1, Ordering::Relaxed);
// TODO: handle overflows?
// TODO: rewrite this in a proper way by holding an intermediary struct in the mutex instead of the Arc directly
val % current_buffer.capacity
};
// Check if subbuffer is already taken. If so, the pool is full.
if current_buffer.subbuffers[next_subbuffer].num_cpu_accesses.compare_and_swap(0, 1, Ordering::SeqCst) != 0 {
if current_buffer.subbuffers[next_subbuffer]
.num_cpu_accesses
.compare_and_swap(0, 1, Ordering::SeqCst) != 0
{
return Err(data);
}
// Reset num_gpu_accesses.
current_buffer.subbuffers[next_subbuffer].num_gpu_accesses.store(0, Ordering::SeqCst);
current_buffer.subbuffers[next_subbuffer]
.num_gpu_accesses
.store(0, Ordering::SeqCst);
// Write `data` in the memory.
unsafe {
let range = (next_subbuffer * self.one_size) .. ((next_subbuffer + 1) * self.one_size);
let mut mapping = current_buffer.memory.mapped_memory().unwrap().read_write(range);
let mut mapping = current_buffer
.memory
.mapped_memory()
.unwrap()
.read_write(range);
*mapping = data;
}
Ok(CpuBufferPoolSubbuffer {
buffer: current_buffer,
subbuffer_index: next_subbuffer,
size: self.one_size,
marker: PhantomData,
})
buffer: current_buffer,
subbuffer_index: next_subbuffer,
size: self.one_size,
marker: PhantomData,
})
}
}
// Can't automatically derive `Clone`, otherwise the compiler adds a `T: Clone` requirement.
impl<T: ?Sized, A> Clone for CpuBufferPool<T, A> where A: MemoryPool + Clone {
impl<T: ?Sized, A> Clone for CpuBufferPool<T, A>
where A: MemoryPool + Clone
{
fn clone(&self) -> Self {
let buf = self.current_buffer.lock().unwrap();
@ -371,9 +403,13 @@ unsafe impl<T: ?Sized, A> DeviceOwned for CpuBufferPool<T, A>
}
}
impl<T: ?Sized, A> Clone for CpuBufferPoolSubbuffer<T, A> where A: MemoryPool {
impl<T: ?Sized, A> Clone for CpuBufferPoolSubbuffer<T, A>
where A: MemoryPool
{
fn clone(&self) -> CpuBufferPoolSubbuffer<T, A> {
let old_val = self.buffer.subbuffers[self.subbuffer_index].num_cpu_accesses.fetch_add(1, Ordering::SeqCst);
let old_val = self.buffer.subbuffers[self.subbuffer_index]
.num_cpu_accesses
.fetch_add(1, Ordering::SeqCst);
debug_assert!(old_val >= 1);
CpuBufferPoolSubbuffer {
@ -458,8 +494,8 @@ unsafe impl<T: ?Sized, A> DeviceOwned for CpuBufferPoolSubbuffer<T, A>
#[cfg(test)]
mod tests {
use std::mem;
use buffer::CpuBufferPool;
use std::mem;
#[test]
fn basic_create() {

View File

@ -13,16 +13,16 @@
//! You can read the buffer multiple times simultaneously from multiple queues. Trying to read and
//! write simultaneously, or write and write simultaneously will block with a semaphore.
use smallvec::SmallVec;
use std::marker::PhantomData;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use smallvec::SmallVec;
use buffer::BufferUsage;
use buffer::sys::BufferCreationError;
use buffer::sys::SparseLevel;
use buffer::sys::UnsafeBuffer;
use buffer::BufferUsage;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
@ -42,7 +42,9 @@ use SafeDeref;
/// Buffer whose content is accessible by the CPU.
#[derive(Debug)]
pub struct DeviceLocalBuffer<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool {
pub struct DeviceLocalBuffer<T: ?Sized, A = Arc<StdMemoryPool>>
where A: MemoryPool
{
// Inner content.
inner: UnsafeBuffer,
@ -66,9 +68,7 @@ impl<T> DeviceLocalBuffer<T> {
-> Result<Arc<DeviceLocalBuffer<T>>, OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
unsafe {
DeviceLocalBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
}
unsafe { DeviceLocalBuffer::raw(device, mem::size_of::<T>(), usage, queue_families) }
}
}
@ -76,12 +76,10 @@ impl<T> DeviceLocalBuffer<[T]> {
/// Builds a new buffer. Can be used for arrays.
#[inline]
pub fn array<'a, I>(device: Arc<Device>, len: usize, usage: BufferUsage, queue_families: I)
-> Result<Arc<DeviceLocalBuffer<[T]>>, OomError>
-> Result<Arc<DeviceLocalBuffer<[T]>>, OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
unsafe {
DeviceLocalBuffer::raw(device, len * mem::size_of::<T>(), usage, queue_families)
}
unsafe { DeviceLocalBuffer::raw(device, len * mem::size_of::<T>(), usage, queue_families) }
}
}
@ -92,12 +90,15 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
///
/// You must ensure that the size that you pass is correct for `T`.
///
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage,
queue_families: I)
-> Result<Arc<DeviceLocalBuffer<T>>, OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families.into_iter().map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let queue_families = queue_families
.into_iter()
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
@ -109,36 +110,45 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::OomError(err)) => return Err(err),
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
// errors can't happen
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
}
};
let mem_ty = {
let device_local = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Linear)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
try!(buffer.bind_memory(mem.memory(), mem.offset()));
buffer.bind_memory(mem.memory(), mem.offset())?;
Ok(Arc::new(DeviceLocalBuffer {
inner: buffer,
memory: mem,
queue_families: queue_families,
gpu_lock: AtomicUsize::new(0),
marker: PhantomData,
}))
inner: buffer,
memory: mem,
queue_families: queue_families,
gpu_lock: AtomicUsize::new(0),
marker: PhantomData,
}))
}
}
impl<T: ?Sized, A> DeviceLocalBuffer<T, A> where A: MemoryPool {
impl<T: ?Sized, A> DeviceLocalBuffer<T, A>
where A: MemoryPool
{
/// Returns the device used to create this buffer.
#[inline]
pub fn device(&self) -> &Arc<Device> {
@ -149,9 +159,15 @@ impl<T: ?Sized, A> DeviceLocalBuffer<T, A> where A: MemoryPool {
// TODO: use a custom iterator
#[inline]
pub fn queue_families(&self) -> Vec<QueueFamily> {
self.queue_families.iter().map(|&num| {
self.device().physical_device().queue_family_by_id(num).unwrap()
}).collect()
self.queue_families
.iter()
.map(|&num| {
self.device()
.physical_device()
.queue_family_by_id(num)
.unwrap()
})
.collect()
}
}

View File

@ -8,34 +8,34 @@
// according to those terms.
//! Buffer that is written once then read for as long as it is alive.
//!
//!
//! Use this buffer when you have data that you never modify.
//!
//! Only the first ever command buffer that uses this buffer can write to it (for example by
//! copying from another buffer). Any subsequent command buffer **must** only read from the buffer,
//! or a panic will happen.
//!
//!
//! The buffer will be stored in device-local memory if possible
//!
use smallvec::SmallVec;
use std::iter;
use std::marker::PhantomData;
use std::mem;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use smallvec::SmallVec;
use buffer::BufferUsage;
use buffer::CpuAccessibleBuffer;
use buffer::sys::BufferCreationError;
use buffer::sys::SparseLevel;
use buffer::sys::UnsafeBuffer;
use buffer::BufferUsage;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
use command_buffer::AutoCommandBufferBuilder;
use command_buffer::AutoCommandBuffer;
use command_buffer::AutoCommandBufferBuilder;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferExecFuture;
use device::Device;
@ -86,13 +86,16 @@ impl<T: ?Sized> ImmutableBuffer<T> {
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
/// either submit your operation after this future, or execute this future and wait for it to
/// be finished before submitting your own operation.
pub fn from_data<'a, I>(data: T, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
pub fn from_data<'a, I>(
data: T, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
where I: IntoIterator<Item = QueueFamily<'a>>,
T: 'static + Send + Sync + Sized,
T: 'static + Send + Sync + Sized
{
let source = CpuAccessibleBuffer::from_data(queue.device().clone(), BufferUsage::transfer_source(),
iter::once(queue.family()), data)?;
let source = CpuAccessibleBuffer::from_data(queue.device().clone(),
BufferUsage::transfer_source(),
iter::once(queue.family()),
data)?;
ImmutableBuffer::from_buffer(source, usage, queue_families, queue)
}
@ -102,21 +105,24 @@ impl<T: ?Sized> ImmutableBuffer<T> {
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
/// either submit your operation after this future, or execute this future and wait for it to
/// be finished before submitting your own operation.
pub fn from_buffer<'a, B, I>(source: B, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
pub fn from_buffer<'a, B, I>(
source: B, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
where B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync,
I: IntoIterator<Item = QueueFamily<'a>>,
T: 'static + Send + Sync,
T: 'static + Send + Sync
{
unsafe {
// We automatically set `transfer_dest` to true in order to avoid annoying errors.
let actual_usage = BufferUsage {
transfer_dest: true,
.. usage
..usage
};
let (buffer, init) = ImmutableBuffer::raw(source.device().clone(), source.size(),
actual_usage, queue_families)?;
let (buffer, init) = ImmutableBuffer::raw(source.device().clone(),
source.size(),
actual_usage,
queue_families)?;
let cb = AutoCommandBufferBuilder::new(source.device().clone(), queue.family())?
.copy_buffer(source, init).unwrap() // TODO: return error?
@ -124,7 +130,7 @@ impl<T: ?Sized> ImmutableBuffer<T> {
let future = match cb.execute(queue) {
Ok(f) => f,
Err(_) => unreachable!()
Err(_) => unreachable!(),
};
Ok((buffer, future))
@ -150,8 +156,9 @@ impl<T> ImmutableBuffer<T> {
/// data, otherwise the content is undefined.
///
#[inline]
pub unsafe fn uninitialized<'a, I>(device: Arc<Device>, usage: BufferUsage, queue_families: I)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
pub unsafe fn uninitialized<'a, I>(
device: Arc<Device>, usage: BufferUsage, queue_families: I)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
ImmutableBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
@ -159,14 +166,17 @@ impl<T> ImmutableBuffer<T> {
}
impl<T> ImmutableBuffer<[T]> {
pub fn from_iter<'a, D, I>(data: D, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), OomError>
pub fn from_iter<'a, D, I>(
data: D, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), OomError>
where I: IntoIterator<Item = QueueFamily<'a>>,
D: ExactSizeIterator<Item = T>,
T: 'static + Send + Sync + Sized,
T: 'static + Send + Sync + Sized
{
let source = CpuAccessibleBuffer::from_iter(queue.device().clone(), BufferUsage::transfer_source(),
iter::once(queue.family()), data)?;
let source = CpuAccessibleBuffer::from_iter(queue.device().clone(),
BufferUsage::transfer_source(),
iter::once(queue.family()),
data)?;
ImmutableBuffer::from_buffer(source, usage, queue_families, queue)
}
@ -187,9 +197,9 @@ impl<T> ImmutableBuffer<[T]> {
/// data, otherwise the content is undefined.
///
#[inline]
pub unsafe fn uninitialized_array<'a, I>(device: Arc<Device>, len: usize, usage: BufferUsage,
queue_families: I)
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferInitialization<[T]>), OomError>
pub unsafe fn uninitialized_array<'a, I>(
device: Arc<Device>, len: usize, usage: BufferUsage, queue_families: I)
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferInitialization<[T]>), OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
ImmutableBuffer::raw(device, len * mem::size_of::<T>(), usage, queue_families)
@ -213,8 +223,9 @@ impl<T: ?Sized> ImmutableBuffer<T> {
/// data.
///
#[inline]
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
pub unsafe fn raw<'a, I>(
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
@ -223,10 +234,9 @@ impl<T: ?Sized> ImmutableBuffer<T> {
// Internal implementation of `raw`. This is separated from `raw` so that it doesn't need to be
// inlined.
unsafe fn raw_impl(device: Arc<Device>, size: usize, usage: BufferUsage,
queue_families: SmallVec<[u32; 4]>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
{
unsafe fn raw_impl(
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: SmallVec<[u32; 4]>)
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError> {
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.iter().cloned())
@ -237,32 +247,39 @@ impl<T: ?Sized> ImmutableBuffer<T> {
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::OomError(err)) => return Err(err),
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
// errors can't happen
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
}
};
let mem_ty = {
let device_local = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Linear)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
try!(buffer.bind_memory(mem.memory(), mem.offset()));
buffer.bind_memory(mem.memory(), mem.offset())?;
let final_buf = Arc::new(ImmutableBuffer {
inner: buffer,
memory: mem,
queue_families: queue_families,
initialized: AtomicBool::new(false),
marker: PhantomData,
});
inner: buffer,
memory: mem,
queue_families: queue_families,
initialized: AtomicBool::new(false),
marker: PhantomData,
});
let initialization = ImmutableBufferInitialization {
buffer: final_buf.clone(),
@ -284,9 +301,15 @@ impl<T: ?Sized, A> ImmutableBuffer<T, A> {
// TODO: use a custom iterator
#[inline]
pub fn queue_families(&self) -> Vec<QueueFamily> {
self.queue_families.iter().map(|&num| {
self.device().physical_device().queue_family_by_id(num).unwrap()
}).collect()
self.queue_families
.iter()
.map(|&num| {
self.device()
.physical_device()
.queue_family_by_id(num)
.unwrap()
})
.collect()
}
}
@ -402,30 +425,40 @@ impl<T: ?Sized, A> Clone for ImmutableBufferInitialization<T, A> {
#[cfg(test)]
mod tests {
use std::iter;
use buffer::BufferUsage;
use buffer::cpu_access::CpuAccessibleBuffer;
use buffer::immutable::ImmutableBuffer;
use buffer::BufferUsage;
use command_buffer::AutoCommandBufferBuilder;
use command_buffer::CommandBuffer;
use std::iter;
use sync::GpuFuture;
#[test]
fn from_data_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(),
let (buffer, _) = ImmutableBuffer::from_data(12u32,
BufferUsage::all(),
iter::once(queue.family()),
queue.clone()).unwrap();
queue.clone())
.unwrap();
let dest = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
iter::once(queue.family()), 0).unwrap();
let dest = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
iter::once(queue.family()),
0)
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.copy_buffer(buffer, dest.clone()).unwrap()
.build().unwrap()
.execute(queue.clone()).unwrap()
.then_signal_fence_and_flush().unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(buffer, dest.clone())
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let dest_content = dest.read().unwrap();
assert_eq!(*dest_content, 12);
@ -435,19 +468,28 @@ mod tests {
fn from_iter_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_iter((0 .. 512u32).map(|n| n * 2), BufferUsage::all(),
let (buffer, _) = ImmutableBuffer::from_iter((0 .. 512u32).map(|n| n * 2),
BufferUsage::all(),
iter::once(queue.family()),
queue.clone()).unwrap();
queue.clone())
.unwrap();
let dest = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(),
let dest = CpuAccessibleBuffer::from_iter(device.clone(),
BufferUsage::all(),
iter::once(queue.family()),
(0 .. 512).map(|_| 0u32)).unwrap();
(0 .. 512).map(|_| 0u32))
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.copy_buffer(buffer, dest.clone()).unwrap()
.build().unwrap()
.execute(queue.clone()).unwrap()
.then_signal_fence_and_flush().unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(buffer, dest.clone())
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let dest_content = dest.read().unwrap();
for (n, &v) in dest_content.iter().enumerate() {
@ -456,39 +498,56 @@ mod tests {
}
#[test]
#[should_panic] // TODO: check Result error instead of panicking
#[should_panic] // TODO: check Result error instead of panicking
fn writing_forbidden() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(),
let (buffer, _) = ImmutableBuffer::from_data(12u32,
BufferUsage::all(),
iter::once(queue.family()),
queue.clone()).unwrap();
queue.clone())
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.fill_buffer(buffer, 50).unwrap()
.build().unwrap()
.execute(queue.clone()).unwrap()
.then_signal_fence_and_flush().unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.fill_buffer(buffer, 50)
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
#[should_panic] // TODO: check Result error instead of panicking
#[should_panic] // TODO: check Result error instead of panicking
fn read_uninitialized_forbidden() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all(),
iter::once(queue.family())).unwrap()
ImmutableBuffer::<u32>::uninitialized(device.clone(),
BufferUsage::all(),
iter::once(queue.family()))
.unwrap()
};
let src = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
iter::once(queue.family()), 0).unwrap();
let src = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
iter::once(queue.family()),
0)
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.copy_buffer(src, buffer).unwrap()
.build().unwrap()
.execute(queue.clone()).unwrap()
.then_signal_fence_and_flush().unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(src, buffer)
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
@ -496,52 +555,78 @@ mod tests {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all(),
iter::once(queue.family())).unwrap()
ImmutableBuffer::<u32>::uninitialized(device.clone(),
BufferUsage::all(),
iter::once(queue.family()))
.unwrap()
};
let src = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
iter::once(queue.family()), 0).unwrap();
let src = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
iter::once(queue.family()),
0)
.unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.copy_buffer(src.clone(), init).unwrap()
.copy_buffer(buffer, src.clone()).unwrap()
.build().unwrap()
.execute(queue.clone()).unwrap()
.then_signal_fence_and_flush().unwrap();
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(src.clone(), init)
.unwrap()
.copy_buffer(buffer, src.clone())
.unwrap()
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
#[ignore] // TODO: doesn't work because the submit sync layer isn't properly implemented
#[ignore] // TODO: doesn't work because the submit sync layer isn't properly implemented
fn init_then_read_same_future() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all(),
iter::once(queue.family())).unwrap()
ImmutableBuffer::<u32>::uninitialized(device.clone(),
BufferUsage::all(),
iter::once(queue.family()))
.unwrap()
};
let src = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
iter::once(queue.family()), 0).unwrap();
let src = CpuAccessibleBuffer::from_data(device.clone(),
BufferUsage::all(),
iter::once(queue.family()),
0)
.unwrap();
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.copy_buffer(src.clone(), init).unwrap()
.build().unwrap();
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(src.clone(), init)
.unwrap()
.build()
.unwrap();
let cb2 = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.copy_buffer(buffer, src.clone()).unwrap()
.build().unwrap();
let cb2 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
.unwrap()
.copy_buffer(buffer, src.clone())
.unwrap()
.build()
.unwrap();
let _ = cb1.execute(queue.clone()).unwrap()
.then_execute(queue.clone(), cb2).unwrap()
.then_signal_fence_and_flush().unwrap();
let _ = cb1.execute(queue.clone())
.unwrap()
.then_execute(queue.clone(), cb2)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
fn create_buffer_zero_size_data() {
let (device, queue) = gfx_dev_and_queue!();
let _ = ImmutableBuffer::from_data((), BufferUsage::all(), Some(queue.family()), queue.clone());
let _ =
ImmutableBuffer::from_data((), BufferUsage::all(), Some(queue.family()), queue.clone());
}
// TODO: write tons of tests that try to exploit loopholes

View File

@ -1,258 +1,266 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::marker::PhantomData;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use std::marker::PhantomData;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use sync::AccessError;
/// A subpart of a buffer.
///
/// This object doesn't correspond to any Vulkan object. It exists for API convenience.
///
/// # Example
///
/// Creating a slice:
///
/// ```ignore // FIXME: unignore
/// use vulkano::buffer::BufferSlice;
/// # let buffer: std::sync::Arc<vulkano::buffer::DeviceLocalBuffer<[u8]>> = return;
/// let _slice = BufferSlice::from(&buffer);
/// ```
///
/// Selecting a slice of a buffer that contains `[T]`:
///
/// ```ignore // FIXME: unignore
/// use vulkano::buffer::BufferSlice;
/// # let buffer: std::sync::Arc<vulkano::buffer::DeviceLocalBuffer<[u8]>> = return;
/// let _slice = BufferSlice::from(&buffer).slice(12 .. 14).unwrap();
/// ```
///
pub struct BufferSlice<T: ?Sized, B> {
marker: PhantomData<T>,
resource: B,
offset: usize,
size: usize,
}
// We need to implement `Clone` manually, otherwise the derive adds a `T: Clone` requirement.
impl<T: ?Sized, B> Clone for BufferSlice<T, B>
where B: Clone
{
#[inline]
fn clone(&self) -> Self {
BufferSlice {
marker: PhantomData,
resource: self.resource.clone(),
offset: self.offset,
size: self.size,
}
}
}
impl<T: ?Sized, B> BufferSlice<T, B> {
#[inline]
pub fn from_typed_buffer_access(r: B) -> BufferSlice<T, B>
where B: TypedBufferAccess<Content = T>
{
let size = r.size();
BufferSlice {
marker: PhantomData,
resource: r,
offset: 0,
size: size,
}
}
/// Returns the buffer that this slice belongs to.
pub fn buffer(&self) -> &B {
&self.resource
}
/// Returns the offset of that slice within the buffer.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
/// Returns the size of that slice in bytes.
#[inline]
pub fn size(&self) -> usize {
self.size
}
/// Builds a slice that contains an element from inside the buffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// is performed.
///
/// # Example
///
/// TODO
///
/// # Safety
///
/// The object whose reference is passed to the closure is uninitialized. Therefore you
/// **must not** access the content of the object.
///
/// You **must** return a reference to an element from the parameter. The closure **must not**
/// panic.
#[inline]
pub unsafe fn slice_custom<F, R: ?Sized>(self, f: F) -> BufferSlice<R, B>
where F: for<'r> FnOnce(&'r T) -> &'r R
// TODO: bounds on R
{
let data: &T = mem::zeroed();
let result = f(data);
let size = mem::size_of_val(result);
let result = result as *const R as *const () as usize;
assert!(result <= self.size());
assert!(result + size <= self.size());
BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + result,
size: size,
}
}
}
impl<T, B> BufferSlice<[T], B> {
/// Returns the number of elements in this slice.
#[inline]
pub fn len(&self) -> usize {
debug_assert_eq!(self.size() % mem::size_of::<T>(), 0);
self.size() / mem::size_of::<T>()
}
/// Reduces the slice to just one element of the array.
///
/// Returns `None` if out of range.
#[inline]
pub fn index(self, index: usize) -> Option<BufferSlice<T, B>> {
if index >= self.len() { return None; }
Some(BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + index * mem::size_of::<T>(),
size: mem::size_of::<T>(),
})
}
/// Reduces the slice to just a range of the array.
///
/// Returns `None` if out of range.
#[inline]
pub fn slice(self, range: Range<usize>) -> Option<BufferSlice<[T], B>> {
if range.end > self.len() { return None; }
Some(BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + range.start * mem::size_of::<T>(),
size: (range.end - range.start) * mem::size_of::<T>(),
})
}
}
unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B> where B: BufferAccess {
#[inline]
fn inner(&self) -> BufferInner {
let inner = self.resource.inner();
BufferInner {
buffer: inner.buffer,
offset: inner.offset + self.offset,
}
}
#[inline]
fn size(&self) -> usize {
self.size
}
#[inline]
fn conflicts_buffer(&self, self_offset: usize, self_size: usize,
other: &BufferAccess, other_offset: usize, other_size: usize) -> bool
{
let self_offset = self.offset + self_offset;
// FIXME: spurious failures ; needs investigation
//debug_assert!(self_size + self_offset <= self.size);
self.resource.conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
}
#[inline]
fn conflict_key(&self, self_offset: usize, self_size: usize) -> u64 {
let self_offset = self.offset + self_offset;
// FIXME: spurious failures ; needs investigation
//debug_assert!(self_size + self_offset <= self.size);
self.resource.conflict_key(self_offset, self_size)
}
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, queue: &Queue) -> Result<(), AccessError> {
self.resource.try_gpu_lock(exclusive_access, queue)
}
#[inline]
unsafe fn increase_gpu_lock(&self) {
self.resource.increase_gpu_lock()
}
#[inline]
unsafe fn unlock(&self) {
self.resource.unlock()
}
}
unsafe impl<T: ?Sized, B> TypedBufferAccess for BufferSlice<T, B> where B: BufferAccess, {
type Content = T;
}
unsafe impl<T: ?Sized, B> DeviceOwned for BufferSlice<T, B>
where B: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.resource.device()
}
}
impl<T, B> From<BufferSlice<T, B>> for BufferSlice<[T], B> {
#[inline]
fn from(r: BufferSlice<T, B>) -> BufferSlice<[T], B> {
BufferSlice {
marker: PhantomData,
resource: r.resource,
offset: r.offset,
size: r.size,
}
}
}
/// Takes a `BufferSlice` that points to a struct, and returns a `BufferSlice` that points to
/// a specific field of that struct.
#[macro_export]
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use sync::AccessError;
/// A subpart of a buffer.
///
/// This object doesn't correspond to any Vulkan object. It exists for API convenience.
///
/// # Example
///
/// Creating a slice:
///
/// ```ignore // FIXME: unignore
/// use vulkano::buffer::BufferSlice;
/// # let buffer: std::sync::Arc<vulkano::buffer::DeviceLocalBuffer<[u8]>> = return;
/// let _slice = BufferSlice::from(&buffer);
/// ```
///
/// Selecting a slice of a buffer that contains `[T]`:
///
/// ```ignore // FIXME: unignore
/// use vulkano::buffer::BufferSlice;
/// # let buffer: std::sync::Arc<vulkano::buffer::DeviceLocalBuffer<[u8]>> = return;
/// let _slice = BufferSlice::from(&buffer).slice(12 .. 14).unwrap();
/// ```
///
pub struct BufferSlice<T: ?Sized, B> {
marker: PhantomData<T>,
resource: B,
offset: usize,
size: usize,
}
// We need to implement `Clone` manually, otherwise the derive adds a `T: Clone` requirement.
impl<T: ?Sized, B> Clone for BufferSlice<T, B>
where B: Clone
{
#[inline]
fn clone(&self) -> Self {
BufferSlice {
marker: PhantomData,
resource: self.resource.clone(),
offset: self.offset,
size: self.size,
}
}
}
impl<T: ?Sized, B> BufferSlice<T, B> {
#[inline]
pub fn from_typed_buffer_access(r: B) -> BufferSlice<T, B>
where B: TypedBufferAccess<Content = T>
{
let size = r.size();
BufferSlice {
marker: PhantomData,
resource: r,
offset: 0,
size: size,
}
}
/// Returns the buffer that this slice belongs to.
pub fn buffer(&self) -> &B {
&self.resource
}
/// Returns the offset of that slice within the buffer.
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
/// Returns the size of that slice in bytes.
#[inline]
pub fn size(&self) -> usize {
self.size
}
/// Builds a slice that contains an element from inside the buffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// is performed.
///
/// # Example
///
/// TODO
///
/// # Safety
///
/// The object whose reference is passed to the closure is uninitialized. Therefore you
/// **must not** access the content of the object.
///
/// You **must** return a reference to an element from the parameter. The closure **must not**
/// panic.
#[inline]
pub unsafe fn slice_custom<F, R: ?Sized>(self, f: F) -> BufferSlice<R, B>
where F: for<'r> FnOnce(&'r T) -> &'r R // TODO: bounds on R
{
let data: &T = mem::zeroed();
let result = f(data);
let size = mem::size_of_val(result);
let result = result as *const R as *const () as usize;
assert!(result <= self.size());
assert!(result + size <= self.size());
BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + result,
size: size,
}
}
}
impl<T, B> BufferSlice<[T], B> {
/// Returns the number of elements in this slice.
#[inline]
pub fn len(&self) -> usize {
debug_assert_eq!(self.size() % mem::size_of::<T>(), 0);
self.size() / mem::size_of::<T>()
}
/// Reduces the slice to just one element of the array.
///
/// Returns `None` if out of range.
#[inline]
pub fn index(self, index: usize) -> Option<BufferSlice<T, B>> {
if index >= self.len() {
return None;
}
Some(BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + index * mem::size_of::<T>(),
size: mem::size_of::<T>(),
})
}
/// Reduces the slice to just a range of the array.
///
/// Returns `None` if out of range.
#[inline]
pub fn slice(self, range: Range<usize>) -> Option<BufferSlice<[T], B>> {
if range.end > self.len() {
return None;
}
Some(BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + range.start * mem::size_of::<T>(),
size: (range.end - range.start) * mem::size_of::<T>(),
})
}
}
unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B>
where B: BufferAccess
{
#[inline]
fn inner(&self) -> BufferInner {
let inner = self.resource.inner();
BufferInner {
buffer: inner.buffer,
offset: inner.offset + self.offset,
}
}
#[inline]
fn size(&self) -> usize {
self.size
}
#[inline]
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
other_offset: usize, other_size: usize)
-> bool {
let self_offset = self.offset + self_offset;
// FIXME: spurious failures ; needs investigation
//debug_assert!(self_size + self_offset <= self.size);
self.resource
.conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
}
#[inline]
fn conflict_key(&self, self_offset: usize, self_size: usize) -> u64 {
let self_offset = self.offset + self_offset;
// FIXME: spurious failures ; needs investigation
//debug_assert!(self_size + self_offset <= self.size);
self.resource.conflict_key(self_offset, self_size)
}
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, queue: &Queue) -> Result<(), AccessError> {
self.resource.try_gpu_lock(exclusive_access, queue)
}
#[inline]
unsafe fn increase_gpu_lock(&self) {
self.resource.increase_gpu_lock()
}
#[inline]
unsafe fn unlock(&self) {
self.resource.unlock()
}
}
unsafe impl<T: ?Sized, B> TypedBufferAccess for BufferSlice<T, B>
where B: BufferAccess
{
type Content = T;
}
unsafe impl<T: ?Sized, B> DeviceOwned for BufferSlice<T, B>
where B: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.resource.device()
}
}
impl<T, B> From<BufferSlice<T, B>> for BufferSlice<[T], B> {
#[inline]
fn from(r: BufferSlice<T, B>) -> BufferSlice<[T], B> {
BufferSlice {
marker: PhantomData,
resource: r.resource,
offset: r.offset,
size: r.size,
}
}
}
/// Takes a `BufferSlice` that points to a struct, and returns a `BufferSlice` that points to
/// a specific field of that struct.
#[macro_export]
macro_rules! buffer_slice_field {
($slice:expr, $field:ident) => (
// TODO: add #[allow(unsafe_code)] when that's allowed
unsafe { $slice.slice_custom(|s| &s.$field) }
)
}
}

View File

@ -8,15 +8,15 @@
// according to those terms.
//! Low level implementation of buffers.
//!
//!
//! Wraps directly around Vulkan buffers, with the exceptions of a few safety checks.
//!
//!
//! The `UnsafeBuffer` type is the lowest-level buffer object provided by this library. It is used
//! internally by the higher-level buffer types. You are strongly encouraged to have excellent
//! knowledge of the Vulkan specs if you want to use an `UnsafeBuffer`.
//!
//!
//! Here is what you must take care of when you use an `UnsafeBuffer`:
//!
//!
//! - Synchronization, ie. avoid reading and writing simultaneously to the same buffer.
//! - Memory aliasing considerations. If you use the same memory to back multiple resources, you
//! must ensure that they are not used together and must enable some additional flags.
@ -24,12 +24,12 @@
//! sparse binding.
//! - Type safety.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use smallvec::SmallVec;
use buffer::BufferUsage;
use buffer::usage::usage_to_bits;
@ -39,10 +39,10 @@ use memory::DeviceMemory;
use memory::MemoryRequirements;
use sync::Sharing;
use check_errors;
use Error;
use OomError;
use VulkanObject;
use check_errors;
use vk;
/// Data storage in a GPU-accessible location.
@ -81,10 +81,10 @@ impl UnsafeBuffer {
let usage_bits = usage_to_bits(usage);
// Checking sparse features.
assert!(sparse.sparse || !sparse.sparse_residency, "Can't enable sparse residency without \
enabling sparse binding as well");
assert!(sparse.sparse || !sparse.sparse_aliased, "Can't enable sparse aliasing without \
enabling sparse binding as well");
assert!(sparse.sparse || !sparse.sparse_residency,
"Can't enable sparse residency without enabling sparse binding as well");
assert!(sparse.sparse || !sparse.sparse_aliased,
"Can't enable sparse aliasing without enabling sparse binding as well");
if sparse.sparse && !device.enabled_features().sparse_binding {
return Err(BufferCreationError::SparseBindingFeatureNotEnabled);
}
@ -113,13 +113,18 @@ impl UnsafeBuffer {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateBuffer(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateBuffer(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
let mem_reqs = {
#[inline] fn align(val: usize, al: usize) -> usize { al * (1 + (val - 1) / al) }
#[inline]
fn align(val: usize, al: usize) -> usize {
al * (1 + (val - 1) / al)
}
let mut output: vk::MemoryRequirements = mem::uninitialized();
vk.GetBufferMemoryRequirements(device.internal_object(), buffer, &mut output);
@ -158,20 +163,19 @@ impl UnsafeBuffer {
Ok((obj, mem_reqs))
}
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize)
-> Result<(), OomError>
{
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize) -> Result<(), OomError> {
let vk = self.device.pointers();
// We check for correctness in debug mode.
debug_assert!({
let mut mem_reqs = mem::uninitialized();
vk.GetBufferMemoryRequirements(self.device.internal_object(), self.buffer,
&mut mem_reqs);
mem_reqs.size <= (memory.size() - offset) as u64 &&
(offset as u64 % mem_reqs.alignment) == 0 &&
mem_reqs.memoryTypeBits & (1 << memory.memory_type().id()) != 0
});
let mut mem_reqs = mem::uninitialized();
vk.GetBufferMemoryRequirements(self.device.internal_object(),
self.buffer,
&mut mem_reqs);
mem_reqs.size <= (memory.size() - offset) as u64 &&
(offset as u64 % mem_reqs.alignment) == 0 &&
mem_reqs.memoryTypeBits & (1 << memory.memory_type().id()) != 0
});
// Check for alignment correctness.
{
@ -187,8 +191,10 @@ impl UnsafeBuffer {
}
}
try!(check_errors(vk.BindBufferMemory(self.device.internal_object(), self.buffer,
memory.internal_object(), offset as vk::DeviceSize)));
check_errors(vk.BindBufferMemory(self.device.internal_object(),
self.buffer,
memory.internal_object(),
offset as vk::DeviceSize))?;
Ok(())
}
@ -303,9 +309,15 @@ impl SparseLevel {
#[inline]
fn to_flags(&self) -> vk::BufferCreateFlagBits {
let mut result = 0;
if self.sparse { result |= vk::BUFFER_CREATE_SPARSE_BINDING_BIT; }
if self.sparse_residency { result |= vk::BUFFER_CREATE_SPARSE_RESIDENCY_BIT; }
if self.sparse_aliased { result |= vk::BUFFER_CREATE_SPARSE_ALIASED_BIT; }
if self.sparse {
result |= vk::BUFFER_CREATE_SPARSE_BINDING_BIT;
}
if self.sparse_residency {
result |= vk::BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
}
if self.sparse_aliased {
result |= vk::BUFFER_CREATE_SPARSE_ALIASED_BIT;
}
result
}
}
@ -344,7 +356,7 @@ impl error::Error for BufferCreationError {
fn cause(&self) -> Option<&error::Error> {
match *self {
BufferCreationError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -369,7 +381,7 @@ impl From<Error> for BufferCreationError {
match err {
err @ Error::OutOfHostMemory => BufferCreationError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => BufferCreationError::OomError(OomError::from(err)),
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -379,9 +391,9 @@ mod tests {
use std::iter::Empty;
use super::BufferCreationError;
use super::BufferUsage;
use super::SparseLevel;
use super::UnsafeBuffer;
use super::BufferUsage;
use device::Device;
use device::DeviceOwned;
@ -391,7 +403,10 @@ mod tests {
fn create() {
let (device, _) = gfx_dev_and_queue!();
let (buf, reqs) = unsafe {
UnsafeBuffer::new(device.clone(), 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
UnsafeBuffer::new(device.clone(),
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
SparseLevel::none())
}.unwrap();
@ -401,23 +416,39 @@ mod tests {
}
#[test]
#[should_panic(expected = "Can't enable sparse residency without enabling sparse binding as well")]
#[should_panic(expected = "Can't enable sparse residency without enabling sparse \
binding as well")]
fn panic_wrong_sparse_residency() {
let (device, _) = gfx_dev_and_queue!();
let sparse = SparseLevel { sparse: false, sparse_residency: true, sparse_aliased: false };
let sparse = SparseLevel {
sparse: false,
sparse_residency: true,
sparse_aliased: false,
};
let _ = unsafe {
UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
UnsafeBuffer::new(device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse)
};
}
#[test]
#[should_panic(expected = "Can't enable sparse aliasing without enabling sparse binding as well")]
#[should_panic(expected = "Can't enable sparse aliasing without enabling sparse \
binding as well")]
fn panic_wrong_sparse_aliased() {
let (device, _) = gfx_dev_and_queue!();
let sparse = SparseLevel { sparse: false, sparse_residency: false, sparse_aliased: true };
let sparse = SparseLevel {
sparse: false,
sparse_residency: false,
sparse_aliased: true,
};
let _ = unsafe {
UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
UnsafeBuffer::new(device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse)
};
}
@ -425,13 +456,19 @@ mod tests {
#[test]
fn missing_feature_sparse_binding() {
let (device, _) = gfx_dev_and_queue!();
let sparse = SparseLevel { sparse: true, sparse_residency: false, sparse_aliased: false };
let sparse = SparseLevel {
sparse: true,
sparse_residency: false,
sparse_aliased: false,
};
unsafe {
match UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
sparse)
{
match UnsafeBuffer::new(device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse) {
Err(BufferCreationError::SparseBindingFeatureNotEnabled) => (),
_ => panic!()
_ => panic!(),
}
};
}
@ -439,13 +476,19 @@ mod tests {
#[test]
fn missing_feature_sparse_residency() {
let (device, _) = gfx_dev_and_queue!(sparse_binding);
let sparse = SparseLevel { sparse: true, sparse_residency: true, sparse_aliased: false };
let sparse = SparseLevel {
sparse: true,
sparse_residency: true,
sparse_aliased: false,
};
unsafe {
match UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
sparse)
{
match UnsafeBuffer::new(device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse) {
Err(BufferCreationError::SparseResidencyBufferFeatureNotEnabled) => (),
_ => panic!()
_ => panic!(),
}
};
}
@ -453,13 +496,19 @@ mod tests {
#[test]
fn missing_feature_sparse_aliased() {
let (device, _) = gfx_dev_and_queue!(sparse_binding);
let sparse = SparseLevel { sparse: true, sparse_residency: false, sparse_aliased: true };
let sparse = SparseLevel {
sparse: true,
sparse_residency: false,
sparse_aliased: true,
};
unsafe {
match UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
sparse)
{
match UnsafeBuffer::new(device,
128,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
sparse) {
Err(BufferCreationError::SparseResidencyAliasedFeatureNotEnabled) => (),
_ => panic!()
_ => panic!(),
}
};
}
@ -469,7 +518,11 @@ mod tests {
let (device, _) = gfx_dev_and_queue!();
unsafe {
let _ = UnsafeBuffer::new(device, 0, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>, SparseLevel::none());
let _ = UnsafeBuffer::new(device,
0,
BufferUsage::all(),
Sharing::Exclusive::<Empty<_>>,
SparseLevel::none());
};
}
}

View File

@ -39,7 +39,10 @@ pub unsafe trait BufferAccess: DeviceOwned {
///
/// This method can only be called for buffers whose type is known to be an array.
#[inline]
fn len(&self) -> usize where Self: TypedBufferAccess, Self::Content: Content {
fn len(&self) -> usize
where Self: TypedBufferAccess,
Self::Content: Content
{
self.size() / <Self::Content as Content>::indiv_size()
}
@ -95,10 +98,9 @@ pub unsafe trait BufferAccess: DeviceOwned {
///
/// If this function returns `false`, this means that we are allowed to access the offset/size
/// of `self` at the same time as the offset/size of `other` without causing a data race.
fn conflicts_buffer(&self, self_offset: usize, self_size: usize,
other: &BufferAccess, other_offset: usize, other_size: usize)
-> bool
{
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
other_offset: usize, other_size: usize)
-> bool {
// TODO: should we really provide a default implementation?
debug_assert!(self_size <= self.size());
@ -129,9 +131,11 @@ pub unsafe trait BufferAccess: DeviceOwned {
/// of `self` at the same time as the offset/size of `other` without causing a data race.
fn conflicts_image(&self, self_offset: usize, self_size: usize, other: &ImageAccess,
other_first_layer: u32, other_num_layers: u32, other_first_mipmap: u32,
other_num_mipmaps: u32) -> bool
{
let other_key = other.conflict_key(other_first_layer, other_num_layers, other_first_mipmap,
other_num_mipmaps: u32)
-> bool {
let other_key = other.conflict_key(other_first_layer,
other_num_layers,
other_first_mipmap,
other_num_mipmaps);
self.conflict_key(self_offset, self_size) == other_key
}
@ -161,7 +165,12 @@ pub unsafe trait BufferAccess: DeviceOwned {
/// Shortcut for `conflicts_image` that compares the whole buffer to a whole image.
#[inline]
fn conflicts_image_all(&self, other: &ImageAccess) -> bool {
self.conflicts_image(0, self.size(), other, 0, other.dimensions().array_layers(), 0,
self.conflicts_image(0,
self.size(),
other,
0,
other.dimensions().array_layers(),
0,
other.mipmap_levels())
}
@ -214,7 +223,10 @@ pub struct BufferInner<'a> {
pub offset: usize,
}
unsafe impl<T> BufferAccess for T where T: SafeDeref, T::Target: BufferAccess {
unsafe impl<T> BufferAccess for T
where T: SafeDeref,
T::Target: BufferAccess
{
#[inline]
fn inner(&self) -> BufferInner {
(**self).inner()
@ -226,9 +238,9 @@ unsafe impl<T> BufferAccess for T where T: SafeDeref, T::Target: BufferAccess {
}
#[inline]
fn conflicts_buffer(&self, self_offset: usize, self_size: usize,
other: &BufferAccess, other_offset: usize, other_size: usize) -> bool
{
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
other_offset: usize, other_size: usize)
-> bool {
(**self).conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
}
@ -259,6 +271,9 @@ pub unsafe trait TypedBufferAccess: BufferAccess {
type Content: ?Sized;
}
unsafe impl<T> TypedBufferAccess for T where T: SafeDeref, T::Target: TypedBufferAccess {
unsafe impl<T> TypedBufferAccess for T
where T: SafeDeref,
T::Target: TypedBufferAccess
{
type Content = <T::Target as TypedBufferAccess>::Content;
}

View File

@ -67,7 +67,7 @@ impl BufferUsage {
pub fn transfer_source() -> BufferUsage {
BufferUsage {
transfer_source: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -76,7 +76,7 @@ impl BufferUsage {
pub fn transfer_dest() -> BufferUsage {
BufferUsage {
transfer_dest: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -85,7 +85,7 @@ impl BufferUsage {
pub fn vertex_buffer() -> BufferUsage {
BufferUsage {
vertex_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -96,7 +96,7 @@ impl BufferUsage {
BufferUsage {
vertex_buffer: true,
transfer_dest: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -105,7 +105,7 @@ impl BufferUsage {
pub fn index_buffer() -> BufferUsage {
BufferUsage {
index_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -115,7 +115,7 @@ impl BufferUsage {
BufferUsage {
index_buffer: true,
transfer_dest: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -124,7 +124,7 @@ impl BufferUsage {
pub fn uniform_buffer() -> BufferUsage {
BufferUsage {
uniform_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -135,7 +135,7 @@ impl BufferUsage {
BufferUsage {
uniform_buffer: true,
transfer_dest: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -144,7 +144,7 @@ impl BufferUsage {
pub fn indirect_buffer() -> BufferUsage {
BufferUsage {
indirect_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
@ -155,7 +155,7 @@ impl BufferUsage {
BufferUsage {
indirect_buffer: true,
transfer_dest: true,
.. BufferUsage::none()
..BufferUsage::none()
}
}
}
@ -183,14 +183,32 @@ impl BitOr for BufferUsage {
#[inline]
pub fn usage_to_bits(usage: BufferUsage) -> vk::BufferUsageFlagBits {
let mut result = 0;
if usage.transfer_source { result |= vk::BUFFER_USAGE_TRANSFER_SRC_BIT; }
if usage.transfer_dest { result |= vk::BUFFER_USAGE_TRANSFER_DST_BIT; }
if usage.uniform_texel_buffer { result |= vk::BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; }
if usage.storage_texel_buffer { result |= vk::BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; }
if usage.uniform_buffer { result |= vk::BUFFER_USAGE_UNIFORM_BUFFER_BIT; }
if usage.storage_buffer { result |= vk::BUFFER_USAGE_STORAGE_BUFFER_BIT; }
if usage.index_buffer { result |= vk::BUFFER_USAGE_INDEX_BUFFER_BIT; }
if usage.vertex_buffer { result |= vk::BUFFER_USAGE_VERTEX_BUFFER_BIT; }
if usage.indirect_buffer { result |= vk::BUFFER_USAGE_INDIRECT_BUFFER_BIT; }
if usage.transfer_source {
result |= vk::BUFFER_USAGE_TRANSFER_SRC_BIT;
}
if usage.transfer_dest {
result |= vk::BUFFER_USAGE_TRANSFER_DST_BIT;
}
if usage.uniform_texel_buffer {
result |= vk::BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
}
if usage.storage_texel_buffer {
result |= vk::BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
}
if usage.uniform_buffer {
result |= vk::BUFFER_USAGE_UNIFORM_BUFFER_BIT;
}
if usage.storage_buffer {
result |= vk::BUFFER_USAGE_STORAGE_BUFFER_BIT;
}
if usage.index_buffer {
result |= vk::BUFFER_USAGE_INDEX_BUFFER_BIT;
}
if usage.vertex_buffer {
result |= vk::BUFFER_USAGE_VERTEX_BUFFER_BIT;
}
if usage.indirect_buffer {
result |= vk::BUFFER_USAGE_INDIRECT_BUFFER_BIT;
}
result
}

View File

@ -8,10 +8,10 @@
// according to those terms.
//! View of a buffer, in order to use it as a uniform texel buffer or storage texel buffer.
//!
//!
//! In order to use a buffer as a uniform texel buffer or a storage texel buffer, you have to
//! create a `BufferView`, which indicates which format the data is in.
//!
//!
//! In order to create a view from a buffer, the buffer must have been created with either the
//! `uniform_texel_buffer` or the `storage_texel_buffer` usage.
//!
@ -37,9 +37,9 @@
//! let _view = BufferView::new(buffer, format::R32Uint).unwrap();
//! ```
use std::marker::PhantomData;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::ptr;
use std::sync::Arc;
@ -61,40 +61,42 @@ use vk;
/// Represents a way for the GPU to interpret buffer data. See the documentation of the
/// `view` module.
pub struct BufferView<F, B> where B: BufferAccess {
pub struct BufferView<F, B>
where B: BufferAccess
{
view: vk::BufferView,
buffer: B,
marker: PhantomData<F>,
atomic_accesses: bool,
}
impl<F, B> BufferView<F, B> where B: BufferAccess {
impl<F, B> BufferView<F, B>
where B: BufferAccess
{
/// Builds a new buffer view.
#[inline]
pub fn new(buffer: B, format: F) -> Result<BufferView<F, B>, BufferViewCreationError>
where B: TypedBufferAccess<Content = [F::Pixel]>,
F: StrongStorage + 'static
{
unsafe {
BufferView::unchecked(buffer, format)
}
unsafe { BufferView::unchecked(buffer, format) }
}
/// Builds a new buffer view from a `BufferAccess` object.
#[inline]
#[deprecated = "Use new() instead"]
pub fn from_access(buffer: B, format: F) -> Result<BufferView<F, B>, BufferViewCreationError>
where B: TypedBufferAccess<Content = [F::Pixel]>, F: StrongStorage + 'static
where B: TypedBufferAccess<Content = [F::Pixel]>,
F: StrongStorage + 'static
{
unsafe {
BufferView::unchecked(buffer, format)
}
unsafe { BufferView::unchecked(buffer, format) }
}
/// Builds a new buffer view without checking that the format is correct.
pub unsafe fn unchecked(org_buffer: B, format: F)
-> Result<BufferView<F, B>, BufferViewCreationError>
where B: BufferAccess, F: FormatDesc + 'static
where B: BufferAccess,
F: FormatDesc + 'static
{
let (view, format_props) = {
let size = org_buffer.size();
@ -110,8 +112,14 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
}
{
let nb = size / format.size().expect("Can't use a compressed format for buffer views");
let l = device.physical_device().limits().max_texel_buffer_elements();
let nb = size /
format
.size()
.expect("Can't use a compressed format for buffer views");
let l = device
.physical_device()
.limits()
.max_texel_buffer_elements();
if nb > l as usize {
return Err(BufferViewCreationError::MaxTexelBufferElementsExceeded);
}
@ -121,7 +129,8 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
let vk_i = device.instance().pointers();
let mut output = mem::uninitialized();
vk_i.GetPhysicalDeviceFormatProperties(device.physical_device().internal_object(),
format as u32, &mut output);
format as u32,
&mut output);
output.bufferFeatures
};
@ -140,7 +149,7 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
let infos = vk::BufferViewCreateInfo {
sType: vk::STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved,
flags: 0, // reserved,
buffer: buffer.internal_object(),
format: format as u32,
offset: offset as u64,
@ -149,18 +158,21 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
let vk = device.pointers();
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateBufferView(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateBufferView(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
(output, format_props)
};
Ok(BufferView {
view: view,
buffer: org_buffer,
marker: PhantomData,
atomic_accesses: (format_props &
vk::FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) != 0,
})
view: view,
buffer: org_buffer,
marker: PhantomData,
atomic_accesses: (format_props &
vk::FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) !=
0,
})
}
/// Returns the buffer associated to this view.
@ -188,7 +200,9 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
}
}
unsafe impl<F, B> VulkanObject for BufferView<F, B> where B: BufferAccess {
unsafe impl<F, B> VulkanObject for BufferView<F, B>
where B: BufferAccess
{
type Object = vk::BufferView;
#[inline]
@ -206,7 +220,9 @@ unsafe impl<F, B> DeviceOwned for BufferView<F, B>
}
}
impl<F, B> fmt::Debug for BufferView<F, B> where B: BufferAccess + fmt::Debug {
impl<F, B> fmt::Debug for BufferView<F, B>
where B: BufferAccess + fmt::Debug
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt.debug_struct("BufferView")
.field("raw", &self.view)
@ -215,12 +231,15 @@ impl<F, B> fmt::Debug for BufferView<F, B> where B: BufferAccess + fmt::Debug {
}
}
impl<F, B> Drop for BufferView<F, B> where B: BufferAccess {
impl<F, B> Drop for BufferView<F, B>
where B: BufferAccess
{
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.buffer.inner().buffer.device().pointers();
vk.DestroyBufferView(self.buffer.inner().buffer.device().internal_object(), self.view,
vk.DestroyBufferView(self.buffer.inner().buffer.device().internal_object(),
self.view,
ptr::null());
}
}
@ -233,7 +252,9 @@ pub unsafe trait BufferViewRef {
fn view(&self) -> &BufferView<Self::Format, Self::BufferAccess>;
}
unsafe impl<F, B> BufferViewRef for BufferView<F, B> where B: BufferAccess {
unsafe impl<F, B> BufferViewRef for BufferView<F, B>
where B: BufferAccess
{
type BufferAccess = B;
type Format = F;
@ -243,7 +264,10 @@ unsafe impl<F, B> BufferViewRef for BufferView<F, B> where B: BufferAccess {
}
}
unsafe impl<T, F, B> BufferViewRef for T where T: SafeDeref<Target = BufferView<F, B>>, B: BufferAccess {
unsafe impl<T, F, B> BufferViewRef for T
where T: SafeDeref<Target = BufferView<F, B>>,
B: BufferAccess
{
type BufferAccess = B;
type Format = F;
@ -275,10 +299,10 @@ impl error::Error for BufferViewCreationError {
fn description(&self) -> &str {
match *self {
BufferViewCreationError::OomError(_) => "out of memory when creating buffer view",
BufferViewCreationError::WrongBufferUsage => "the buffer is missing correct usage \
flags",
BufferViewCreationError::UnsupportedFormat => "the requested format is not supported \
for this usage",
BufferViewCreationError::WrongBufferUsage =>
"the buffer is missing correct usage flags",
BufferViewCreationError::UnsupportedFormat =>
"the requested format is not supported for this usage",
BufferViewCreationError::MaxTexelBufferElementsExceeded => {
"the maximum number of texel elements is exceeded"
},
@ -317,10 +341,10 @@ impl From<Error> for BufferViewCreationError {
#[cfg(test)]
mod tests {
use buffer::BufferView;
use buffer::BufferUsage;
use buffer::view::BufferViewCreationError;
use buffer::BufferView;
use buffer::immutable::ImmutableBuffer;
use buffer::view::BufferViewCreationError;
use format;
#[test]
@ -330,11 +354,14 @@ mod tests {
let usage = BufferUsage {
uniform_texel_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
};
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), usage,
Some(queue.family()), queue.clone()).unwrap();
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0 .. 128).map(|_| [0; 4]),
usage,
Some(queue.family()),
queue.clone())
.unwrap();
let view = BufferView::new(buffer, format::R8G8B8A8Unorm).unwrap();
assert!(view.uniform_texel_buffer());
@ -347,12 +374,14 @@ mod tests {
let usage = BufferUsage {
storage_texel_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
};
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), usage,
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0 .. 128).map(|_| [0; 4]),
usage,
Some(queue.family()),
queue.clone()).unwrap();
queue.clone())
.unwrap();
let view = BufferView::new(buffer, format::R8G8B8A8Unorm).unwrap();
assert!(view.storage_texel_buffer());
@ -365,12 +394,14 @@ mod tests {
let usage = BufferUsage {
storage_texel_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
};
let (buffer, _) = ImmutableBuffer::<[u32]>::from_iter((0..128).map(|_| 0), usage,
let (buffer, _) = ImmutableBuffer::<[u32]>::from_iter((0 .. 128).map(|_| 0),
usage,
Some(queue.family()),
queue.clone()).unwrap();
queue.clone())
.unwrap();
let view = BufferView::new(buffer, format::R32Uint).unwrap();
assert!(view.storage_texel_buffer());
@ -382,14 +413,15 @@ mod tests {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]),
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0 .. 128).map(|_| [0; 4]),
BufferUsage::none(),
Some(queue.family()),
queue.clone()).unwrap();
queue.clone())
.unwrap();
match BufferView::new(buffer, format::R8G8B8A8Unorm) {
Err(BufferViewCreationError::WrongBufferUsage) => (),
_ => panic!()
_ => panic!(),
}
}
@ -400,17 +432,19 @@ mod tests {
let usage = BufferUsage {
uniform_texel_buffer: true,
storage_texel_buffer: true,
.. BufferUsage::none()
..BufferUsage::none()
};
let (buffer, _) = ImmutableBuffer::<[[f64; 4]]>::from_iter((0..128).map(|_| [0.0; 4]),
usage, Some(queue.family()),
queue.clone()).unwrap();
let (buffer, _) = ImmutableBuffer::<[[f64; 4]]>::from_iter((0 .. 128).map(|_| [0.0; 4]),
usage,
Some(queue.family()),
queue.clone())
.unwrap();
// TODO: what if R64G64B64A64Sfloat is supported?
match BufferView::new(buffer, format::R64G64B64A64Sfloat) {
Err(BufferViewCreationError::UnsupportedFormat) => (),
_ => panic!()
_ => panic!(),
}
}
}

View File

@ -14,6 +14,7 @@ use std::mem;
use std::slice;
use std::sync::Arc;
use OomError;
use buffer::BufferAccess;
use buffer::TypedBufferAccess;
use command_buffer::CommandBuffer;
@ -29,8 +30,8 @@ use command_buffer::pool::standard::StandardCommandPoolAlloc;
use command_buffer::pool::standard::StandardCommandPoolBuilder;
use command_buffer::synced::SyncCommandBuffer;
use command_buffer::synced::SyncCommandBufferBuilder;
use command_buffer::synced::SyncCommandBufferBuilderError;
use command_buffer::synced::SyncCommandBufferBuilderBindVertexBuffer;
use command_buffer::synced::SyncCommandBufferBuilderError;
use command_buffer::sys::Flags;
use command_buffer::sys::Kind;
use command_buffer::sys::UnsafeCommandBuffer;
@ -43,10 +44,10 @@ use device::Device;
use device::DeviceOwned;
use device::Queue;
use framebuffer::FramebufferAbstract;
use framebuffer::RenderPassDescClearValues;
use framebuffer::RenderPassAbstract;
use image::ImageLayout;
use framebuffer::RenderPassDescClearValues;
use image::ImageAccess;
use image::ImageLayout;
use instance::QueueFamily;
use pipeline::ComputePipelineAbstract;
use pipeline::GraphicsPipelineAbstract;
@ -54,9 +55,8 @@ use pipeline::input_assembly::Index;
use pipeline::vertex::VertexSource;
use sync::AccessCheckError;
use sync::AccessFlagBits;
use sync::PipelineStages;
use sync::GpuFuture;
use OomError;
use sync::PipelineStages;
///
///
@ -72,17 +72,16 @@ pub struct AutoCommandBufferBuilder<P = StandardCommandPoolBuilder> {
impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
pub fn new(device: Arc<Device>, queue_family: QueueFamily)
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
{
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
unsafe {
let pool = Device::standard_command_pool(&device, queue_family);
let inner = SyncCommandBufferBuilder::new(&pool, Kind::primary(), Flags::None);
let state_cacher = StateCacher::new();
Ok(AutoCommandBufferBuilder {
inner: inner?,
state_cacher: state_cacher,
})
inner: inner?,
state_cacher: state_cacher,
})
}
}
}
@ -94,9 +93,7 @@ impl<P> AutoCommandBufferBuilder<P> {
where P: CommandPoolBuilderAlloc
{
// TODO: error if we're inside a render pass
Ok(AutoCommandBuffer {
inner: self.inner.build()?
})
Ok(AutoCommandBuffer { inner: self.inner.build()? })
}
/// Adds a command that enters a render pass.
@ -107,15 +104,15 @@ impl<P> AutoCommandBufferBuilder<P> {
///
/// You must call this before you can add draw commands.
#[inline]
pub fn begin_render_pass<F, C>(mut self, framebuffer: F, secondary: bool,
clear_values: C)
pub fn begin_render_pass<F, C>(mut self, framebuffer: F, secondary: bool, clear_values: C)
-> Result<Self, AutoCommandBufferBuilderContextError>
where F: FramebufferAbstract + RenderPassDescClearValues<C> + Send + Sync + 'static
{
unsafe {
let clear_values = framebuffer.convert_clear_values(clear_values);
let clear_values = clear_values.collect::<Vec<_>>().into_iter(); // TODO: necessary for Send + Sync ; needs an API rework of convert_clear_values
self.inner.begin_render_pass(framebuffer, secondary, clear_values);
let clear_values = clear_values.collect::<Vec<_>>().into_iter(); // TODO: necessary for Send + Sync ; needs an API rework of convert_clear_values
self.inner
.begin_render_pass(framebuffer, secondary, clear_values);
Ok(self)
}
}
@ -125,9 +122,10 @@ impl<P> AutoCommandBufferBuilder<P> {
/// This command will copy from the source to the destination. If their size is not equal, then
/// the amount of data copied is equal to the smallest of the two.
#[inline]
pub fn copy_buffer<S, D>(mut self, src: S, dest: D) -> Result<Self, validity::CheckCopyBufferError>
pub fn copy_buffer<S, D>(mut self, src: S, dest: D)
-> Result<Self, validity::CheckCopyBufferError>
where S: BufferAccess + Send + Sync + 'static,
D: BufferAccess + Send + Sync + 'static,
D: BufferAccess + Send + Sync + 'static
{
unsafe {
// TODO: check that we're not in a render pass
@ -143,19 +141,19 @@ impl<P> AutoCommandBufferBuilder<P> {
pub fn copy_buffer_to_image<S, D>(mut self, src: S, dest: D)
-> Result<Self, AutoCommandBufferBuilderContextError>
where S: BufferAccess + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static
{
let dims = dest.dimensions().width_height_depth();
self.copy_buffer_to_image_dimensions(src, dest, [0, 0, 0], dims, 0, 1, 0)
}
/// Adds a command that copies from a buffer to an image.
pub fn copy_buffer_to_image_dimensions<S, D>(mut self, src: S, dest: D, offset: [u32; 3],
size: [u32; 3], first_layer: u32, num_layers: u32,
mipmap: u32)
-> Result<Self, AutoCommandBufferBuilderContextError>
pub fn copy_buffer_to_image_dimensions<S, D>(
mut self, src: S, dest: D, offset: [u32; 3], size: [u32; 3], first_layer: u32,
num_layers: u32, mipmap: u32)
-> Result<Self, AutoCommandBufferBuilderContextError>
where S: BufferAccess + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static,
D: ImageAccess + Send + Sync + 'static
{
unsafe {
// TODO: check that we're not in a render pass
@ -167,7 +165,11 @@ impl<P> AutoCommandBufferBuilder<P> {
buffer_row_length: 0,
buffer_image_height: 0,
image_aspect: if dest.has_color() {
UnsafeCommandBufferBuilderImageAspect { color: true, depth: false, stencil: false }
UnsafeCommandBufferBuilderImageAspect {
color: true,
depth: false,
stencil: false,
}
} else {
unimplemented!()
},
@ -188,13 +190,15 @@ impl<P> AutoCommandBufferBuilder<P> {
#[inline]
pub fn dispatch<Cp, S, Pc>(mut self, dimensions: [u32; 3], pipeline: Cp, sets: S, constants: Pc)
-> Result<Self, AutoCommandBufferBuilderContextError>
where Cp: ComputePipelineAbstract + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection,
where Cp: ComputePipelineAbstract + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection
{
unsafe {
// TODO: missing checks
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_compute_pipeline(&pipeline) {
if let StateCacherOutcome::NeedChange =
self.state_cacher.bind_compute_pipeline(&pipeline)
{
self.inner.bind_pipeline_compute(pipeline.clone());
}
@ -208,43 +212,50 @@ impl<P> AutoCommandBufferBuilder<P> {
#[inline]
pub fn draw<V, Gp, S, Pc>(mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, sets: S,
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection,
constants: Pc)
-> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection
{
unsafe {
// TODO: missing checks
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
if let StateCacherOutcome::NeedChange =
self.state_cacher.bind_graphics_pipeline(&pipeline)
{
self.inner.bind_pipeline_graphics(pipeline.clone());
}
push_constants(&mut self.inner, pipeline.clone(), constants);
set_state(&mut self.inner, dynamic);
descriptor_sets(&mut self.inner, true, pipeline.clone(), sets);
let (vertex_count, instance_count) = vertex_buffers(&mut self.inner, &pipeline,
vertices);
let (vertex_count, instance_count) =
vertex_buffers(&mut self.inner, &pipeline, vertices);
self.inner.draw(vertex_count as u32, instance_count as u32, 0, 0);
self.inner
.draw(vertex_count as u32, instance_count as u32, 0, 0);
Ok(self)
}
}
#[inline]
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(mut self, pipeline: Gp, dynamic: DynamicState,
vertices: V, index_buffer: Ib, sets: S,
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(
mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, index_buffer: Ib, sets: S,
constants: Pc)
-> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection,
Ib: BufferAccess + TypedBufferAccess<Content = [I]> + Send + Sync + 'static,
I: Index + 'static,
I: Index + 'static
{
unsafe {
// TODO: missing checks
let index_count = index_buffer.len();
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
if let StateCacherOutcome::NeedChange =
self.state_cacher.bind_graphics_pipeline(&pipeline)
{
self.inner.bind_pipeline_graphics(pipeline.clone());
}
@ -262,18 +273,24 @@ impl<P> AutoCommandBufferBuilder<P> {
#[inline]
pub fn draw_indirect<V, Gp, S, Pc, Ib>(mut self, pipeline: Gp, dynamic: DynamicState,
vertices: V, indirect_buffer: Ib, sets: S,
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
vertices: V, indirect_buffer: Ib, sets: S, constants: Pc)
-> Result<Self, AutoCommandBufferBuilderContextError>
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
S: DescriptorSetsCollection,
Ib: BufferAccess + TypedBufferAccess<Content = [DrawIndirectCommand]> + Send + Sync + 'static,
Ib: BufferAccess
+ TypedBufferAccess<Content = [DrawIndirectCommand]>
+ Send
+ Sync
+ 'static
{
unsafe {
// TODO: missing checks
let draw_count = indirect_buffer.len() as u32;
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
if let StateCacherOutcome::NeedChange =
self.state_cacher.bind_graphics_pipeline(&pipeline)
{
self.inner.bind_pipeline_graphics(pipeline.clone());
}
@ -282,7 +299,8 @@ impl<P> AutoCommandBufferBuilder<P> {
descriptor_sets(&mut self.inner, true, pipeline.clone(), sets);
vertex_buffers(&mut self.inner, &pipeline, vertices);
self.inner.draw_indirect(indirect_buffer, draw_count,
self.inner.draw_indirect(indirect_buffer,
draw_count,
mem::size_of::<DrawIndirectCommand>() as u32);
Ok(self)
}
@ -312,8 +330,9 @@ impl<P> AutoCommandBufferBuilder<P> {
/// > this function only for zeroing the content of a buffer by passing `0` for the data.
// TODO: not safe because of signalling NaNs
#[inline]
pub fn fill_buffer<B>(mut self, buffer: B, data: u32) -> Result<Self, validity::CheckFillBufferError>
where B: BufferAccess + Send + Sync + 'static,
pub fn fill_buffer<B>(mut self, buffer: B, data: u32)
-> Result<Self, validity::CheckFillBufferError>
where B: BufferAccess + Send + Sync + 'static
{
unsafe {
// TODO: check that we're not in a render pass
@ -326,8 +345,7 @@ impl<P> AutoCommandBufferBuilder<P> {
/// Adds a command that jumps to the next subpass of the current render pass.
#[inline]
pub fn next_subpass(mut self, secondary: bool)
-> Result<Self, AutoCommandBufferBuilderContextError>
{
-> Result<Self, AutoCommandBufferBuilderContextError> {
unsafe {
// TODO: check
self.inner.next_subpass(secondary);
@ -353,7 +371,7 @@ impl<P> AutoCommandBufferBuilder<P> {
if buffer.size() > size_of_data {
self.inner.update_buffer(buffer, data);
} else {
unimplemented!() // TODO:
unimplemented!() // TODO:
//self.inner.update_buffer(buffer.slice(0 .. size_of_data), data);
}
@ -377,18 +395,20 @@ unsafe fn push_constants<P, Pl, Pc>(dest: &mut SyncCommandBufferBuilder<P>, pipe
for num_range in 0 .. pipeline.num_push_constants_ranges() {
let range = match pipeline.push_constants_range(num_range) {
Some(r) => r,
None => continue
None => continue,
};
debug_assert_eq!(range.offset % 4, 0);
debug_assert_eq!(range.size % 4, 0);
let data = slice::from_raw_parts((&push_constants as *const Pc as *const u8)
.offset(range.offset as isize),
.offset(range.offset as isize),
range.size as usize);
dest.push_constants::<_, [u8]>(pipeline.clone(), range.stages,
range.offset as u32, range.size as u32,
dest.push_constants::<_, [u8]>(pipeline.clone(),
range.stages,
range.offset as u32,
range.size as u32,
data);
}
}
@ -400,18 +420,19 @@ unsafe fn set_state<P>(dest: &mut SyncCommandBufferBuilder<P>, dynamic: DynamicS
}
if let Some(ref viewports) = dynamic.viewports {
dest.set_viewport(0, viewports.iter().cloned().collect::<Vec<_>>().into_iter()); // TODO: don't collect
dest.set_viewport(0, viewports.iter().cloned().collect::<Vec<_>>().into_iter()); // TODO: don't collect
}
if let Some(ref scissors) = dynamic.scissors {
dest.set_scissor(0, scissors.iter().cloned().collect::<Vec<_>>().into_iter()); // TODO: don't collect
dest.set_scissor(0, scissors.iter().cloned().collect::<Vec<_>>().into_iter()); // TODO: don't collect
}
}
// Shortcut function to bind vertex buffers.
unsafe fn vertex_buffers<P, Gp, V>(dest: &mut SyncCommandBufferBuilder<P>, pipeline: &Gp,
vertices: V) -> (u32, u32)
where Gp: VertexSource<V>,
vertices: V)
-> (u32, u32)
where Gp: VertexSource<V>
{
let (vertex_buffers, vertex_count, instance_count) = pipeline.decode(vertices);
@ -451,22 +472,24 @@ unsafe impl<P> CommandBuffer for AutoCommandBuffer<P> {
}
#[inline]
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue)
-> Result<(), CommandBufferExecError> {
self.inner.prepare_submit(future, queue)
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
fn check_buffer_access(
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
self.inner.check_buffer_access(buffer, exclusive, queue)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
self.inner.check_image_access(image, layout, exclusive, queue)
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
self.inner
.check_image_access(image, layout, exclusive, queue)
}
}

View File

@ -28,10 +28,10 @@
//! Using secondary command buffers leads to slightly lower performances on the GPU, but they have
//! two advantages on the CPU side:
//!
//! - Building a command buffer is a single-threaded operation, but by using secondary command
//! - Building a command buffer is a single-threaded operation, but by using secondary command
//! buffers you can build multiple secondary command buffers in multiple threads simultaneously.
//! - Secondary command buffers can be kept alive between frames. When you always repeat the same
//! operations, it might be a good idea to build a secondary command buffer once at
//! - Secondary command buffers can be kept alive between frames. When you always repeat the same
//! operations, it might be a good idea to build a secondary command buffer once at
//! initialization and then reuse it afterwards.
//!
//! # The `AutoCommandBufferBuilder`
@ -73,8 +73,8 @@
//! alternative command pool implementations and use them. See the `pool` module for more
//! information.
pub use self::auto::AutoCommandBufferBuilder;
pub use self::auto::AutoCommandBuffer;
pub use self::auto::AutoCommandBufferBuilder;
pub use self::state_cacher::StateCacher;
pub use self::state_cacher::StateCacherOutcome;
pub use self::traits::CommandBuffer;
@ -82,8 +82,8 @@ pub use self::traits::CommandBufferBuild;
pub use self::traits::CommandBufferExecError;
pub use self::traits::CommandBufferExecFuture;
use pipeline::viewport::Viewport;
use pipeline::viewport::Scissor;
use pipeline::viewport::Viewport;
pub mod pool;
pub mod submit;

View File

@ -8,24 +8,24 @@
// according to those terms.
//! In the Vulkan API, command buffers must be allocated from *command pools*.
//!
//!
//! A command pool holds and manages the memory of one or more command buffers. If you destroy a
//! command pool, all of its command buffers are automatically destroyed.
//!
//!
//! In vulkano, creating a command buffer requires passing an implementation of the `CommandPool`
//! trait. By default vulkano will use the `StandardCommandPool` struct, but you can implement
//! this trait yourself by wrapping around the `UnsafeCommandPool` type.
use instance::QueueFamily;
use device::DeviceOwned;
use OomError;
use device::DeviceOwned;
pub use self::standard::StandardCommandPool;
pub use self::sys::CommandPoolTrimError;
pub use self::sys::UnsafeCommandPool;
pub use self::sys::UnsafeCommandPoolAlloc;
pub use self::sys::UnsafeCommandPoolAllocIter;
pub use self::sys::CommandPoolTrimError;
pub mod standard;
mod sys;

View File

@ -7,6 +7,7 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use fnv::FnvHasher;
use std::cmp;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
@ -14,7 +15,6 @@ use std::marker::PhantomData;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::Weak;
use fnv::FnvHasher;
use command_buffer::pool::CommandPool;
use command_buffer::pool::CommandPoolAlloc;
@ -23,16 +23,18 @@ use command_buffer::pool::UnsafeCommandPool;
use command_buffer::pool::UnsafeCommandPoolAlloc;
use instance::QueueFamily;
use device::Device;
use device::DeviceOwned;
use OomError;
use VulkanObject;
use device::Device;
use device::DeviceOwned;
// Since the stdlib doesn't have a "thread ID" yet, we store a `Box<u8>` for each thread and the
// value of the pointer will be used as a thread id.
thread_local!(static THREAD_ID: Box<u8> = Box::new(0));
#[inline]
fn curr_thread_id() -> usize { THREAD_ID.with(|data| &**data as *const u8 as usize) }
fn curr_thread_id() -> usize {
THREAD_ID.with(|data| &**data as *const u8 as usize)
}
/// Standard implementation of a command pool.
///
@ -47,12 +49,15 @@ pub struct StandardCommandPool {
queue_family: u32,
// For each "thread id" (see `THREAD_ID` above), we store thread-specific info.
per_thread: Mutex<HashMap<usize, Weak<Mutex<StandardCommandPoolPerThread>>,
per_thread: Mutex<HashMap<usize,
Weak<Mutex<StandardCommandPoolPerThread>>,
BuildHasherDefault<FnvHasher>>>,
}
unsafe impl Send for StandardCommandPool {}
unsafe impl Sync for StandardCommandPool {}
unsafe impl Send for StandardCommandPool {
}
unsafe impl Sync for StandardCommandPool {
}
struct StandardCommandPoolPerThread {
// The Vulkan pool of this thread.
@ -83,7 +88,7 @@ impl StandardCommandPool {
}
unsafe impl CommandPool for Arc<StandardCommandPool> {
type Iter = Box<Iterator<Item = StandardCommandPoolBuilder>>; // TODO: meh for Box
type Iter = Box<Iterator<Item = StandardCommandPoolBuilder>>; // TODO: meh for Box
type Builder = StandardCommandPoolBuilder;
type Alloc = StandardCommandPoolAlloc;
@ -98,13 +103,13 @@ unsafe impl CommandPool for Arc<StandardCommandPool> {
let per_thread = match per_thread {
Some(pt) => pt,
None => {
let new_pool = try!(UnsafeCommandPool::new(self.device.clone(), self.queue_family(),
false, true));
let new_pool =
UnsafeCommandPool::new(self.device.clone(), self.queue_family(), false, true)?;
let pt = Arc::new(Mutex::new(StandardCommandPoolPerThread {
pool: new_pool,
available_primary_command_buffers: Vec::new(),
available_secondary_command_buffers: Vec::new(),
}));
pool: new_pool,
available_primary_command_buffers: Vec::new(),
available_secondary_command_buffers: Vec::new(),
}));
hashmap.insert(curr_thread_id, Arc::downgrade(&pt));
pt
@ -116,39 +121,51 @@ unsafe impl CommandPool for Arc<StandardCommandPool> {
// Build an iterator to pick from already-existing command buffers.
let (num_from_existing, from_existing) = {
// Which list of already-existing command buffers we are going to pick CBs from.
let mut existing = if secondary { &mut pt_lock.available_secondary_command_buffers }
else { &mut pt_lock.available_primary_command_buffers };
let mut existing = if secondary {
&mut pt_lock.available_secondary_command_buffers
} else {
&mut pt_lock.available_primary_command_buffers
};
let num_from_existing = cmp::min(count as usize, existing.len());
let from_existing = existing.drain(0 .. num_from_existing).collect::<Vec<_>>().into_iter();
let from_existing = existing
.drain(0 .. num_from_existing)
.collect::<Vec<_>>()
.into_iter();
(num_from_existing, from_existing)
};
// Build an iterator to construct the missing command buffers from the Vulkan pool.
let num_new = count as usize - num_from_existing;
debug_assert!(num_new <= count as usize); // Check overflows.
let newly_allocated = try!(pt_lock.pool.alloc_command_buffers(secondary, num_new));
debug_assert!(num_new <= count as usize); // Check overflows.
let newly_allocated = pt_lock.pool.alloc_command_buffers(secondary, num_new)?;
// Returning them as a chain.
let device = self.device.clone();
let queue_family_id = self.queue_family;
let per_thread = per_thread.clone();
let final_iter = from_existing.chain(newly_allocated).map(move |cmd| {
StandardCommandPoolBuilder {
cmd: Some(cmd),
pool: per_thread.clone(),
secondary: secondary,
device: device.clone(),
queue_family_id: queue_family_id,
dummy_avoid_send_sync: PhantomData,
}
}).collect::<Vec<_>>();
let final_iter = from_existing
.chain(newly_allocated)
.map(move |cmd| {
StandardCommandPoolBuilder {
cmd: Some(cmd),
pool: per_thread.clone(),
secondary: secondary,
device: device.clone(),
queue_family_id: queue_family_id,
dummy_avoid_send_sync: PhantomData,
}
})
.collect::<Vec<_>>();
Ok(Box::new(final_iter.into_iter()))
}
#[inline]
fn queue_family(&self) -> QueueFamily {
self.device.physical_device().queue_family_by_id(self.queue_family).unwrap()
self.device
.physical_device()
.queue_family_by_id(self.queue_family)
.unwrap()
}
}
@ -189,7 +206,10 @@ unsafe impl CommandPoolBuilderAlloc for StandardCommandPoolBuilder {
#[inline]
fn queue_family(&self) -> QueueFamily {
self.device.physical_device().queue_family_by_id(self.queue_family_id).unwrap()
self.device
.physical_device()
.queue_family_by_id(self.queue_family_id)
.unwrap()
}
}
@ -222,8 +242,10 @@ pub struct StandardCommandPoolAlloc {
queue_family_id: u32,
}
unsafe impl Send for StandardCommandPoolAlloc {}
unsafe impl Sync for StandardCommandPoolAlloc {}
unsafe impl Send for StandardCommandPoolAlloc {
}
unsafe impl Sync for StandardCommandPoolAlloc {
}
unsafe impl CommandPoolAlloc for StandardCommandPoolAlloc {
#[inline]
@ -233,7 +255,10 @@ unsafe impl CommandPoolAlloc for StandardCommandPoolAlloc {
#[inline]
fn queue_family(&self) -> QueueFamily {
self.device.physical_device().queue_family_by_id(self.queue_family_id).unwrap()
self.device
.physical_device()
.queue_family_by_id(self.queue_family_id)
.unwrap()
}
}
@ -249,9 +274,11 @@ impl Drop for StandardCommandPoolAlloc {
let mut pool = self.pool.lock().unwrap();
if self.secondary {
pool.available_secondary_command_buffers.push(self.cmd.take().unwrap());
pool.available_secondary_command_buffers
.push(self.cmd.take().unwrap());
} else {
pool.available_primary_command_buffers.push(self.cmd.take().unwrap());
pool.available_primary_command_buffers
.push(self.cmd.take().unwrap());
}
}
}

View File

@ -7,23 +7,23 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::ptr;
use std::error;
use std::fmt;
use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
use smallvec::SmallVec;
use instance::QueueFamily;
use device::Device;
use device::DeviceOwned;
use Error;
use OomError;
use VulkanObject;
use Error;
use check_errors;
use device::Device;
use device::DeviceOwned;
use vk;
/// Low-level implementation of a command pool.
@ -46,7 +46,8 @@ pub struct UnsafeCommandPool {
dummy_avoid_sync: PhantomData<*const u8>,
}
unsafe impl Send for UnsafeCommandPool {}
unsafe impl Send for UnsafeCommandPool {
}
impl UnsafeCommandPool {
/// Creates a new pool.
@ -62,9 +63,8 @@ impl UnsafeCommandPool {
///
/// - Panics if the queue family doesn't belong to the same physical device as `device`.
///
pub fn new(device: Arc<Device>, queue_family: QueueFamily, transient: bool,
reset_cb: bool) -> Result<UnsafeCommandPool, OomError>
{
pub fn new(device: Arc<Device>, queue_family: QueueFamily, transient: bool, reset_cb: bool)
-> Result<UnsafeCommandPool, OomError> {
assert_eq!(device.physical_device().internal_object(),
queue_family.physical_device().internal_object(),
"Device doesn't match physical device when creating a command pool");
@ -72,9 +72,16 @@ impl UnsafeCommandPool {
let vk = device.pointers();
let flags = {
let flag1 = if transient { vk::COMMAND_POOL_CREATE_TRANSIENT_BIT } else { 0 };
let flag2 = if reset_cb { vk::COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT }
else { 0 };
let flag1 = if transient {
vk::COMMAND_POOL_CREATE_TRANSIENT_BIT
} else {
0
};
let flag2 = if reset_cb {
vk::COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT
} else {
0
};
flag1 | flag2
};
@ -87,17 +94,19 @@ impl UnsafeCommandPool {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateCommandPool(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateCommandPool(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(UnsafeCommandPool {
pool: pool,
device: device.clone(),
queue_family_index: queue_family.id(),
dummy_avoid_sync: PhantomData,
})
pool: pool,
device: device.clone(),
queue_family_index: queue_family.id(),
dummy_avoid_sync: PhantomData,
})
}
/// Resets the pool, which resets all the command buffers that were allocated from it.
@ -110,11 +119,14 @@ impl UnsafeCommandPool {
/// The command buffers allocated from this pool jump to the initial state.
///
pub unsafe fn reset(&self, release_resources: bool) -> Result<(), OomError> {
let flags = if release_resources { vk::COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT }
else { 0 };
let flags = if release_resources {
vk::COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT
} else {
0
};
let vk = self.device.pointers();
try!(check_errors(vk.ResetCommandPool(self.device.internal_object(), self.pool, flags)));
check_errors(vk.ResetCommandPool(self.device.internal_object(), self.pool, flags))?;
Ok(())
}
@ -134,7 +146,9 @@ impl UnsafeCommandPool {
}
let vk = self.device.pointers();
vk.TrimCommandPoolKHR(self.device.internal_object(), self.pool, 0 /* reserved */);
vk.TrimCommandPoolKHR(self.device.internal_object(),
self.pool,
0 /* reserved */);
Ok(())
}
}
@ -144,34 +158,33 @@ impl UnsafeCommandPool {
/// If `secondary` is true, allocates secondary command buffers. Otherwise, allocates primary
/// command buffers.
pub fn alloc_command_buffers(&self, secondary: bool, count: usize)
-> Result<UnsafeCommandPoolAllocIter, OomError>
{
-> Result<UnsafeCommandPoolAllocIter, OomError> {
if count == 0 {
return Ok(UnsafeCommandPoolAllocIter {
list: None
});
return Ok(UnsafeCommandPoolAllocIter { list: None });
}
let infos = vk::CommandBufferAllocateInfo {
sType: vk::STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
pNext: ptr::null(),
commandPool: self.pool,
level: if secondary { vk::COMMAND_BUFFER_LEVEL_SECONDARY }
else { vk::COMMAND_BUFFER_LEVEL_PRIMARY },
level: if secondary {
vk::COMMAND_BUFFER_LEVEL_SECONDARY
} else {
vk::COMMAND_BUFFER_LEVEL_PRIMARY
},
commandBufferCount: count as u32,
};
unsafe {
let vk = self.device.pointers();
let mut out = Vec::with_capacity(count);
try!(check_errors(vk.AllocateCommandBuffers(self.device.internal_object(), &infos,
out.as_mut_ptr())));
check_errors(vk.AllocateCommandBuffers(self.device.internal_object(),
&infos,
out.as_mut_ptr()))?;
out.set_len(count);
Ok(UnsafeCommandPoolAllocIter {
list: Some(out.into_iter())
})
Ok(UnsafeCommandPoolAllocIter { list: Some(out.into_iter()) })
}
}
@ -186,14 +199,19 @@ impl UnsafeCommandPool {
{
let command_buffers: SmallVec<[_; 4]> = command_buffers.map(|cb| cb.0).collect();
let vk = self.device.pointers();
vk.FreeCommandBuffers(self.device.internal_object(), self.pool,
command_buffers.len() as u32, command_buffers.as_ptr())
vk.FreeCommandBuffers(self.device.internal_object(),
self.pool,
command_buffers.len() as u32,
command_buffers.as_ptr())
}
/// Returns the queue family on which command buffers of this pool can be executed.
#[inline]
pub fn queue_family(&self) -> QueueFamily {
self.device.physical_device().queue_family_by_id(self.queue_family_index).unwrap()
self.device
.physical_device()
.queue_family_by_id(self.queue_family_index)
.unwrap()
}
}
@ -238,7 +256,7 @@ unsafe impl VulkanObject for UnsafeCommandPoolAlloc {
/// Iterator for newly-allocated command buffers.
#[derive(Debug)]
pub struct UnsafeCommandPoolAllocIter {
list: Option<VecIntoIter<vk::CommandBuffer>>
list: Option<VecIntoIter<vk::CommandBuffer>>,
}
impl Iterator for UnsafeCommandPoolAllocIter {
@ -246,16 +264,23 @@ impl Iterator for UnsafeCommandPoolAllocIter {
#[inline]
fn next(&mut self) -> Option<UnsafeCommandPoolAlloc> {
self.list.as_mut().and_then(|i| i.next()).map(|cb| UnsafeCommandPoolAlloc(cb))
self.list
.as_mut()
.and_then(|i| i.next())
.map(|cb| UnsafeCommandPoolAlloc(cb))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.list.as_ref().map(|i| i.size_hint()).unwrap_or((0, Some(0)))
self.list
.as_ref()
.map(|i| i.size_hint())
.unwrap_or((0, Some(0)))
}
}
impl ExactSizeIterator for UnsafeCommandPoolAllocIter {}
impl ExactSizeIterator for UnsafeCommandPoolAllocIter {
}
/// Error that can happen when trimming command pools.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
@ -268,8 +293,8 @@ impl error::Error for CommandPoolTrimError {
#[inline]
fn description(&self) -> &str {
match *self {
CommandPoolTrimError::Maintenance1ExtensionNotEnabled => "the `KHR_maintenance1` \
extension was not enabled",
CommandPoolTrimError::Maintenance1ExtensionNotEnabled =>
"the `KHR_maintenance1` extension was not enabled",
}
}
}
@ -290,8 +315,8 @@ impl From<Error> for CommandPoolTrimError {
#[cfg(test)]
mod tests {
use command_buffer::pool::UnsafeCommandPool;
use command_buffer::pool::CommandPoolTrimError;
use command_buffer::pool::UnsafeCommandPool;
#[test]
fn basic_create() {
@ -321,7 +346,7 @@ mod tests {
match pool.trim() {
Err(CommandPoolTrimError::Maintenance1ExtensionNotEnabled) => (),
_ => panic!()
_ => panic!(),
}
}

View File

@ -7,10 +7,10 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use VulkanObject;
use command_buffer::DynamicState;
use pipeline::ComputePipelineAbstract;
use pipeline::GraphicsPipelineAbstract;
use VulkanObject;
use vk;
/// Keep track of the state of a command buffer builder, so that you don't need to bind objects

View File

@ -7,11 +7,11 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::ptr;
use smallvec::SmallVec;
use buffer::sys::UnsafeBuffer;
use device::Queue;
@ -20,12 +20,12 @@ use memory::DeviceMemory;
use sync::Fence;
use sync::Semaphore;
use check_errors;
use vk;
use Error;
use OomError;
use VulkanObject;
use SynchronizedVulkanObject;
use VulkanObject;
use check_errors;
use vk;
// TODO: correctly implement Debug on all the structs of this module
@ -127,8 +127,7 @@ impl<'a> SubmitBindSparseBuilder<'a> {
/// error.
#[inline]
pub fn merge(&mut self, other: SubmitBindSparseBuilder<'a>)
-> Result<(), SubmitBindSparseBuilder<'a>>
{
-> Result<(), SubmitBindSparseBuilder<'a>> {
if self.fence != 0 && other.fence != 0 {
return Err(other);
}
@ -147,39 +146,42 @@ impl<'a> SubmitBindSparseBuilder<'a> {
// We start by storing all the `VkSparseBufferMemoryBindInfo`s of the whole command
// in the same collection.
let buffer_binds_storage: SmallVec<[_; 4]> = self.infos.iter()
let buffer_binds_storage: SmallVec<[_; 4]> = self.infos
.iter()
.flat_map(|infos| infos.buffer_binds.iter())
.map(|buf_bind| {
vk::SparseBufferMemoryBindInfo {
buffer: buf_bind.buffer,
bindCount: buf_bind.binds.len() as u32,
pBinds: buf_bind.binds.as_ptr(),
}
})
vk::SparseBufferMemoryBindInfo {
buffer: buf_bind.buffer,
bindCount: buf_bind.binds.len() as u32,
pBinds: buf_bind.binds.as_ptr(),
}
})
.collect();
// Same for all the `VkSparseImageOpaqueMemoryBindInfo`s.
let image_opaque_binds_storage: SmallVec<[_; 4]> = self.infos.iter()
let image_opaque_binds_storage: SmallVec<[_; 4]> = self.infos
.iter()
.flat_map(|infos| infos.image_opaque_binds.iter())
.map(|img_bind| {
vk::SparseImageOpaqueMemoryBindInfo {
image: img_bind.image,
bindCount: img_bind.binds.len() as u32,
pBinds: img_bind.binds.as_ptr(),
}
})
vk::SparseImageOpaqueMemoryBindInfo {
image: img_bind.image,
bindCount: img_bind.binds.len() as u32,
pBinds: img_bind.binds.as_ptr(),
}
})
.collect();
// And finally the `VkSparseImageMemoryBindInfo`s.
let image_binds_storage: SmallVec<[_; 4]> = self.infos.iter()
let image_binds_storage: SmallVec<[_; 4]> = self.infos
.iter()
.flat_map(|infos| infos.image_binds.iter())
.map(|img_bind| {
vk::SparseImageMemoryBindInfo {
image: img_bind.image,
bindCount: img_bind.binds.len() as u32,
pBinds: img_bind.binds.as_ptr(),
}
})
vk::SparseImageMemoryBindInfo {
image: img_bind.image,
bindCount: img_bind.binds.len() as u32,
pBinds: img_bind.binds.as_ptr(),
}
})
.collect();
// Now building the collection of `VkBindSparseInfo`s.
@ -231,14 +233,17 @@ impl<'a> SubmitBindSparseBuilder<'a> {
// If these assertions fail, then there's something wrong in the code above.
debug_assert_eq!(next_buffer_bind as usize, buffer_binds_storage.len());
debug_assert_eq!(next_image_opaque_bind as usize, image_opaque_binds_storage.len());
debug_assert_eq!(next_image_opaque_bind as usize,
image_opaque_binds_storage.len());
debug_assert_eq!(next_image_bind as usize, image_binds_storage.len());
bs_infos
};
// Finally executing the command.
check_errors(vk.QueueBindSparse(*queue, bs_infos.len() as u32, bs_infos.as_ptr(),
check_errors(vk.QueueBindSparse(*queue,
bs_infos.len() as u32,
bs_infos.as_ptr(),
self.fence))?;
Ok(())
}
@ -359,25 +364,24 @@ impl<'a> SubmitBindSparseBufferBindBuilder<'a> {
}
pub unsafe fn add_bind(&mut self, offset: usize, size: usize, memory: &DeviceMemory,
memory_offset: usize)
{
memory_offset: usize) {
self.binds.push(vk::SparseMemoryBind {
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: memory.internal_object(),
memoryOffset: memory_offset as vk::DeviceSize,
flags: 0, // Flags are only relevant for images.
});
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: memory.internal_object(),
memoryOffset: memory_offset as vk::DeviceSize,
flags: 0, // Flags are only relevant for images.
});
}
pub unsafe fn add_unbind(&mut self, offset: usize, size: usize) {
self.binds.push(vk::SparseMemoryBind {
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: 0,
memoryOffset: 0,
flags: 0,
});
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: 0,
memoryOffset: 0,
flags: 0,
});
}
}
@ -401,29 +405,28 @@ impl<'a> SubmitBindSparseImageOpaqueBindBuilder<'a> {
}
pub unsafe fn add_bind(&mut self, offset: usize, size: usize, memory: &DeviceMemory,
memory_offset: usize, bind_metadata: bool)
{
memory_offset: usize, bind_metadata: bool) {
self.binds.push(vk::SparseMemoryBind {
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: memory.internal_object(),
memoryOffset: memory_offset as vk::DeviceSize,
flags: if bind_metadata {
vk::SPARSE_MEMORY_BIND_METADATA_BIT
} else {
0
},
});
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: memory.internal_object(),
memoryOffset: memory_offset as vk::DeviceSize,
flags: if bind_metadata {
vk::SPARSE_MEMORY_BIND_METADATA_BIT
} else {
0
},
});
}
pub unsafe fn add_unbind(&mut self, offset: usize, size: usize) {
self.binds.push(vk::SparseMemoryBind {
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: 0,
memoryOffset: 0,
flags: 0, // TODO: is that relevant?
});
resourceOffset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
memory: 0,
memoryOffset: 0,
flags: 0, // TODO: is that relevant?
});
}
}
@ -473,7 +476,7 @@ impl error::Error for SubmitBindSparseError {
fn cause(&self) -> Option<&error::Error> {
match *self {
SubmitBindSparseError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -492,7 +495,7 @@ impl From<Error> for SubmitBindSparseError {
err @ Error::OutOfHostMemory => SubmitBindSparseError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => SubmitBindSparseError::OomError(OomError::from(err)),
Error::DeviceLost => SubmitBindSparseError::DeviceLost,
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}

View File

@ -13,12 +13,12 @@
//! module. These structs are low-level and unsafe, and are mostly used to implement other parts
//! of vulkano, so you are encouraged to not use them directly.
pub use self::bind_sparse::SubmitBindSparseBuilder;
pub use self::bind_sparse::SubmitBindSparseBatchBuilder;
pub use self::bind_sparse::SubmitBindSparseBufferBindBuilder;
pub use self::bind_sparse::SubmitBindSparseImageOpaqueBindBuilder;
pub use self::bind_sparse::SubmitBindSparseImageBindBuilder;
pub use self::bind_sparse::SubmitBindSparseBuilder;
pub use self::bind_sparse::SubmitBindSparseError;
pub use self::bind_sparse::SubmitBindSparseImageBindBuilder;
pub use self::bind_sparse::SubmitBindSparseImageOpaqueBindBuilder;
pub use self::queue_present::SubmitPresentBuilder;
pub use self::queue_present::SubmitPresentError;
pub use self::queue_submit::SubmitCommandBufferBuilder;

View File

@ -7,23 +7,23 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::ptr;
use smallvec::SmallVec;
use device::Queue;
use swapchain::Swapchain;
use sync::Semaphore;
use check_errors;
use vk;
use Error;
use OomError;
use VulkanObject;
use SynchronizedVulkanObject;
use VulkanObject;
use check_errors;
use vk;
/// Prototype for a submission that presents a swapchain on the screen.
// TODO: example here
@ -100,7 +100,7 @@ impl<'a> SubmitPresentBuilder<'a> {
let vk = queue.device().pointers();
let queue = queue.internal_object_guard();
let mut results = vec![mem::uninitialized(); self.swapchains.len()]; // TODO: alloca
let mut results = vec![mem::uninitialized(); self.swapchains.len()]; // TODO: alloca
let infos = vk::PresentInfoKHR {
sType: vk::STRUCTURE_TYPE_PRESENT_INFO_KHR,
@ -113,7 +113,7 @@ impl<'a> SubmitPresentBuilder<'a> {
pResults: results.as_mut_ptr(),
};
try!(check_errors(vk.QueuePresentKHR(*queue, &infos)));
check_errors(vk.QueuePresentKHR(*queue, &infos))?;
for result in results {
// TODO: AMD driver initially didn't write the results ; check that it's been fixed
@ -158,7 +158,7 @@ impl error::Error for SubmitPresentError {
fn cause(&self) -> Option<&error::Error> {
match *self {
SubmitPresentError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -179,7 +179,7 @@ impl From<Error> for SubmitPresentError {
Error::DeviceLost => SubmitPresentError::DeviceLost,
Error::SurfaceLost => SubmitPresentError::SurfaceLost,
Error::OutOfDate => SubmitPresentError::OutOfDate,
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}

View File

@ -7,11 +7,11 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::ptr;
use smallvec::SmallVec;
use command_buffer::sys::UnsafeCommandBuffer;
use device::Queue;
@ -19,12 +19,12 @@ use sync::Fence;
use sync::PipelineStages;
use sync::Semaphore;
use check_errors;
use vk;
use Error;
use OomError;
use VulkanObject;
use SynchronizedVulkanObject;
use VulkanObject;
use check_errors;
use vk;
/// Prototype for a submission that executes command buffers.
// TODO: example here
@ -219,7 +219,7 @@ impl<'a> SubmitCommandBufferBuilder<'a> {
pSignalSemaphores: self.signal_semaphores.as_ptr(),
};
try!(check_errors(vk.QueueSubmit(*queue, 1, &batch, self.fence)));
check_errors(vk.QueueSubmit(*queue, 1, &batch, self.fence))?;
Ok(())
}
}
@ -232,10 +232,10 @@ impl<'a> SubmitCommandBufferBuilder<'a> {
// TODO: create multiple batches instead
pub fn merge(mut self, other: Self) -> Self {
assert!(self.fence == 0 || other.fence == 0,
"Can't merge two queue submits that both have a fence");
"Can't merge two queue submits that both have a fence");
self.wait_semaphores.extend(other.wait_semaphores);
self.dest_stages.extend(other.dest_stages); // TODO: meh? will be solved if we submit multiple batches
self.dest_stages.extend(other.dest_stages); // TODO: meh? will be solved if we submit multiple batches
self.signal_semaphores.extend(other.signal_semaphores);
self.command_buffers.extend(other.command_buffers);
@ -271,7 +271,7 @@ impl error::Error for SubmitCommandBufferError {
fn cause(&self) -> Option<&error::Error> {
match *self {
SubmitCommandBufferError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -288,17 +288,18 @@ impl From<Error> for SubmitCommandBufferError {
fn from(err: Error) -> SubmitCommandBufferError {
match err {
err @ Error::OutOfHostMemory => SubmitCommandBufferError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => SubmitCommandBufferError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory =>
SubmitCommandBufferError::OomError(OomError::from(err)),
Error::DeviceLost => SubmitCommandBufferError::DeviceLost,
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use super::*;
use std::time::Duration;
use sync::Fence;
#[test]

View File

@ -27,9 +27,7 @@ impl<'a> SubmitSemaphoresWaitBuilder<'a> {
/// Builds a new empty `SubmitSemaphoresWaitBuilder`.
#[inline]
pub fn new() -> SubmitSemaphoresWaitBuilder<'a> {
SubmitSemaphoresWaitBuilder {
semaphores: SmallVec::new(),
}
SubmitSemaphoresWaitBuilder { semaphores: SmallVec::new() }
}
/// Adds an operation that waits on a semaphore.
@ -53,11 +51,12 @@ impl<'a> Into<SubmitCommandBufferBuilder<'a>> for SubmitSemaphoresWaitBuilder<'a
unsafe {
let mut builder = SubmitCommandBufferBuilder::new();
for sem in self.semaphores.drain() {
builder.add_wait_semaphore(sem, PipelineStages {
// TODO: correct stages ; hard
all_commands: true,
.. PipelineStages::none()
});
builder.add_wait_semaphore(sem,
PipelineStages {
// TODO: correct stages ; hard
all_commands: true,
..PipelineStages::none()
});
}
builder
}

File diff suppressed because it is too large Load Diff

View File

@ -7,23 +7,26 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::fmt;
use std::mem;
use std::ops::Range;
use std::ptr;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use smallvec::SmallVec;
use OomError;
use VulkanObject;
use buffer::BufferAccess;
use buffer::BufferInner;
use check_errors;
use command_buffer::CommandBuffer;
use command_buffer::pool::CommandPool;
use command_buffer::pool::CommandPoolBuilderAlloc;
use command_buffer::pool::CommandPoolAlloc;
use command_buffer::pool::CommandPoolBuilderAlloc;
use descriptor::descriptor::ShaderStages;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use device::Device;
use device::DeviceOwned;
use format::ClearValue;
@ -33,8 +36,8 @@ use framebuffer::FramebufferAbstract;
use framebuffer::RenderPass;
use framebuffer::RenderPassAbstract;
use framebuffer::Subpass;
use image::ImageLayout;
use image::ImageAccess;
use image::ImageLayout;
use instance::QueueFamily;
use pipeline::ComputePipelineAbstract;
use pipeline::GraphicsPipelineAbstract;
@ -42,11 +45,8 @@ use pipeline::input_assembly::IndexType;
use pipeline::viewport::Scissor;
use pipeline::viewport::Viewport;
use sync::AccessFlagBits;
use sync::PipelineStages;
use sync::Event;
use OomError;
use VulkanObject;
use check_errors;
use sync::PipelineStages;
use vk;
/// Determines the kind of command buffer that we want to create.
@ -71,24 +71,32 @@ pub enum Kind<R, F> {
},
}
impl Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
impl
Kind<RenderPass<EmptySinglePassRenderPassDesc>,
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
/// Equivalent to `Kind::Primary`.
///
/// > **Note**: If you use `let kind = Kind::Primary;` in your code, you will probably get a
/// > compilation error because the Rust compiler couldn't determine the template parameters
/// > of `Kind`. To solve that problem in an easy way you can use this function instead.
#[inline]
pub fn primary() -> Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
pub fn primary()
-> Kind<RenderPass<EmptySinglePassRenderPassDesc>,
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
{
Kind::Primary
}
/// Equivalent to `Kind::Secondary`.
///
/// > **Note**: If you use `let kind = Kind::Secondary;` in your code, you will probably get a
/// > compilation error because the Rust compiler couldn't determine the template parameters
/// > of `Kind`. To solve that problem in an easy way you can use this function instead.
#[inline]
pub fn secondary() -> Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
pub fn secondary()
-> Kind<RenderPass<EmptySinglePassRenderPassDesc>,
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
{
Kind::Secondary
}
}
@ -155,7 +163,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
/// > **Note**: Some checks are still made with `debug_assert!`. Do not expect to be able to
/// > submit invalid commands.
pub unsafe fn new<Pool, R, F, A>(pool: &Pool, kind: Kind<R, F>, flags: Flags)
-> Result<UnsafeCommandBufferBuilder<P>, OomError>
-> Result<UnsafeCommandBufferBuilder<P>, OomError>
where Pool: CommandPool<Builder = P, Alloc = A>,
P: CommandPoolBuilderAlloc<Alloc = A>,
A: CommandPoolAlloc,
@ -164,11 +172,13 @@ impl<P> UnsafeCommandBufferBuilder<P> {
{
let secondary = match kind {
Kind::Primary => false,
Kind::Secondary | Kind::SecondaryRenderPass { .. } => true,
Kind::Secondary |
Kind::SecondaryRenderPass { .. } => true,
};
let cmd = try!(pool.alloc(secondary, 1)).next().expect("Requested one command buffer from \
the command pool, but got zero.");
let cmd = pool.alloc(secondary, 1)?
.next()
.expect("Requested one command buffer from the command pool, but got zero.");
UnsafeCommandBufferBuilder::already_allocated(cmd, kind, flags)
}
@ -213,7 +223,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
(0, 0)
};
let framebuffer = if let Kind::SecondaryRenderPass { ref subpass, framebuffer: Some(ref framebuffer) } = kind {
let framebuffer = if let Kind::SecondaryRenderPass {
ref subpass,
framebuffer: Some(ref framebuffer),
} = kind
{
// TODO: restore check
//assert!(framebuffer.is_compatible_with(subpass.render_pass())); // TODO: proper error
FramebufferAbstract::inner(&framebuffer).internal_object()
@ -227,9 +241,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
renderPass: rp,
subpass: sp,
framebuffer: framebuffer,
occlusionQueryEnable: 0, // TODO:
queryFlags: 0, // TODO:
pipelineStatistics: 0, // TODO:
occlusionQueryEnable: 0, // TODO:
queryFlags: 0, // TODO:
pipelineStatistics: 0, // TODO:
};
let infos = vk::CommandBufferBeginInfo {
@ -239,14 +253,14 @@ impl<P> UnsafeCommandBufferBuilder<P> {
pInheritanceInfo: &inheritance,
};
try!(check_errors(vk.BeginCommandBuffer(cmd, &infos)));
check_errors(vk.BeginCommandBuffer(cmd, &infos))?;
Ok(UnsafeCommandBufferBuilder {
cmd: Some(alloc),
cmd_raw: cmd,
device: device.clone(),
flags: flags,
})
cmd: Some(alloc),
cmd_raw: cmd,
device: device.clone(),
flags: flags,
})
}
/// Returns the queue family of the builder.
@ -260,21 +274,21 @@ impl<P> UnsafeCommandBufferBuilder<P> {
/// Turns the builder into an actual command buffer.
#[inline]
pub fn build(mut self) -> Result<UnsafeCommandBuffer<P::Alloc>, OomError>
where P: CommandPoolBuilderAlloc
where P: CommandPoolBuilderAlloc
{
unsafe {
let cmd = self.cmd.take().unwrap();
let vk = self.device.pointers();
try!(check_errors(vk.EndCommandBuffer(cmd.inner().internal_object())));
check_errors(vk.EndCommandBuffer(cmd.inner().internal_object()))?;
let cmd_raw = cmd.inner().internal_object();
Ok(UnsafeCommandBuffer {
cmd: cmd.into_alloc(),
cmd_raw: cmd_raw,
device: self.device.clone(),
flags: self.flags,
already_submitted: AtomicBool::new(false),
})
cmd: cmd.into_alloc(),
cmd_raw: cmd_raw,
device: self.device.clone(),
flags: self.flags,
already_submitted: AtomicBool::new(false),
})
}
}
@ -293,41 +307,46 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let raw_render_pass = RenderPassAbstract::inner(&framebuffer).internal_object();
let raw_framebuffer = FramebufferAbstract::inner(&framebuffer).internal_object();
let raw_clear_values: SmallVec<[_; 12]> = clear_values.map(|clear_value| {
match clear_value {
ClearValue::None => {
vk::ClearValue::color(vk::ClearColorValue::float32([0.0; 4]))
},
ClearValue::Float(val) => {
vk::ClearValue::color(vk::ClearColorValue::float32(val))
},
ClearValue::Int(val) => {
vk::ClearValue::color(vk::ClearColorValue::int32(val))
},
ClearValue::Uint(val) => {
vk::ClearValue::color(vk::ClearColorValue::uint32(val))
},
ClearValue::Depth(val) => {
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
depth: val, stencil: 0
})
},
ClearValue::Stencil(val) => {
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
depth: 0.0, stencil: val
})
},
ClearValue::DepthStencil((depth, stencil)) => {
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
depth: depth, stencil: stencil,
})
},
}
}).collect();
let raw_clear_values: SmallVec<[_; 12]> = clear_values
.map(|clear_value| match clear_value {
ClearValue::None => {
vk::ClearValue::color(vk::ClearColorValue::float32([0.0; 4]))
},
ClearValue::Float(val) => {
vk::ClearValue::color(vk::ClearColorValue::float32(val))
},
ClearValue::Int(val) => {
vk::ClearValue::color(vk::ClearColorValue::int32(val))
},
ClearValue::Uint(val) => {
vk::ClearValue::color(vk::ClearColorValue::uint32(val))
},
ClearValue::Depth(val) => {
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
depth: val,
stencil: 0,
})
},
ClearValue::Stencil(val) => {
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
depth: 0.0,
stencil: val,
})
},
ClearValue::DepthStencil((depth, stencil)) => {
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
depth: depth,
stencil: stencil,
})
},
})
.collect();
// TODO: allow customizing
let rect = [0 .. framebuffer.dimensions()[0],
0 .. framebuffer.dimensions()[1]];
let rect = [
0 .. framebuffer.dimensions()[0],
0 .. framebuffer.dimensions()[1],
];
let begin = vk::RenderPassBeginInfo {
sType: vk::STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
@ -347,9 +366,12 @@ impl<P> UnsafeCommandBufferBuilder<P> {
clearValueCount: raw_clear_values.len() as u32,
pClearValues: raw_clear_values.as_ptr(),
};
let contents = if secondary { vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS }
else { vk::SUBPASS_CONTENTS_INLINE };
let contents = if secondary {
vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
} else {
vk::SUBPASS_CONTENTS_INLINE
};
vk.CmdBeginRenderPass(cmd, &begin, contents);
}
@ -364,7 +386,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
sets: S, dynamic_offsets: I)
where Pl: ?Sized + PipelineLayoutAbstract,
S: Iterator<Item = &'s UnsafeDescriptorSet>,
I: Iterator<Item = u32>,
I: Iterator<Item = u32>
{
let vk = self.device().pointers();
let cmd = self.internal_object();
@ -378,12 +400,20 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let num_bindings = sets.len() as u32;
debug_assert!(first_binding + num_bindings <= pipeline_layout.num_sets() as u32);
let bind_point = if graphics { vk::PIPELINE_BIND_POINT_GRAPHICS }
else { vk::PIPELINE_BIND_POINT_COMPUTE };
let bind_point = if graphics {
vk::PIPELINE_BIND_POINT_GRAPHICS
} else {
vk::PIPELINE_BIND_POINT_COMPUTE
};
vk.CmdBindDescriptorSets(cmd, bind_point, pipeline_layout.sys().internal_object(),
first_binding, num_bindings, sets.as_ptr(),
dynamic_offsets.len() as u32, dynamic_offsets.as_ptr());
vk.CmdBindDescriptorSets(cmd,
bind_point,
pipeline_layout.sys().internal_object(),
first_binding,
num_bindings,
sets.as_ptr(),
dynamic_offsets.len() as u32,
dynamic_offsets.as_ptr());
}
/// Calls `vkCmdBindIndexBuffer` on the builder.
@ -398,7 +428,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert!(inner.offset < inner.buffer.size());
debug_assert!(inner.buffer.usage_index_buffer());
vk.CmdBindIndexBuffer(cmd, inner.buffer.internal_object(), inner.offset as vk::DeviceSize,
vk.CmdBindIndexBuffer(cmd,
inner.buffer.internal_object(),
inner.offset as vk::DeviceSize,
index_ty as vk::IndexType);
}
@ -409,7 +441,8 @@ impl<P> UnsafeCommandBufferBuilder<P> {
{
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdBindPipeline(cmd, vk::PIPELINE_BIND_POINT_COMPUTE,
vk.CmdBindPipeline(cmd,
vk::PIPELINE_BIND_POINT_COMPUTE,
pipeline.inner().internal_object());
}
@ -430,8 +463,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
/// usage of the command anyway.
#[inline]
pub unsafe fn bind_vertex_buffers(&mut self, first_binding: u32,
params: UnsafeCommandBufferBuilderBindVertexBuffer)
{
params: UnsafeCommandBufferBuilderBindVertexBuffer) {
debug_assert_eq!(params.raw_buffers.len(), params.offsets.len());
if params.raw_buffers.is_empty() {
@ -444,11 +476,17 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let num_bindings = params.raw_buffers.len() as u32;
debug_assert!({
let max_bindings = self.device().physical_device().limits().max_vertex_input_bindings();
first_binding + num_bindings <= max_bindings
});
let max_bindings = self.device()
.physical_device()
.limits()
.max_vertex_input_bindings();
first_binding + num_bindings <= max_bindings
});
vk.CmdBindVertexBuffers(cmd, first_binding, num_bindings, params.raw_buffers.as_ptr(),
vk.CmdBindVertexBuffers(cmd,
first_binding,
num_bindings,
params.raw_buffers.as_ptr(),
params.offsets.as_ptr());
}
@ -495,13 +533,15 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert!(destination.offset < destination.buffer.size());
debug_assert!(destination.buffer.usage_transfer_dest());
let regions: SmallVec<[_; 8]> = regions.map(|(sr, de, sz)| {
vk::BufferCopy {
srcOffset: (sr + source.offset) as vk::DeviceSize,
dstOffset: (de + destination.offset) as vk::DeviceSize,
size: sz as vk::DeviceSize,
}
}).collect();
let regions: SmallVec<[_; 8]> = regions
.map(|(sr, de, sz)| {
vk::BufferCopy {
srcOffset: (sr + source.offset) as vk::DeviceSize,
dstOffset: (de + destination.offset) as vk::DeviceSize,
size: sz as vk::DeviceSize,
}
})
.collect();
if regions.is_empty() {
return;
@ -509,8 +549,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdCopyBuffer(cmd, source.buffer.internal_object(), destination.buffer.internal_object(),
regions.len() as u32, regions.as_ptr());
vk.CmdCopyBuffer(cmd,
source.buffer.internal_object(),
destination.buffer.internal_object(),
regions.len() as u32,
regions.as_ptr());
}
/// Calls `vkCmdCopyBufferToImage` on the builder.
@ -528,29 +571,31 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert!(source.offset < source.buffer.size());
debug_assert!(source.buffer.usage_transfer_src());
let regions: SmallVec<[_; 8]> = regions.map(|copy| {
vk::BufferImageCopy {
bufferOffset: (source.offset + copy.buffer_offset) as vk::DeviceSize,
bufferRowLength: copy.buffer_row_length,
bufferImageHeight: copy.buffer_image_height,
imageSubresource: vk::ImageSubresourceLayers {
aspectMask: copy.image_aspect.to_vk_bits(),
mipLevel: copy.image_mip_level,
baseArrayLayer: copy.image_base_array_layer,
layerCount: copy.image_layer_count,
},
imageOffset: vk::Offset3D {
x: copy.image_offset[0],
y: copy.image_offset[1],
z: copy.image_offset[2],
},
imageExtent: vk::Extent3D {
width: copy.image_extent[0],
height: copy.image_extent[1],
depth: copy.image_extent[2],
},
}
}).collect();
let regions: SmallVec<[_; 8]> = regions
.map(|copy| {
vk::BufferImageCopy {
bufferOffset: (source.offset + copy.buffer_offset) as vk::DeviceSize,
bufferRowLength: copy.buffer_row_length,
bufferImageHeight: copy.buffer_image_height,
imageSubresource: vk::ImageSubresourceLayers {
aspectMask: copy.image_aspect.to_vk_bits(),
mipLevel: copy.image_mip_level,
baseArrayLayer: copy.image_base_array_layer,
layerCount: copy.image_layer_count,
},
imageOffset: vk::Offset3D {
x: copy.image_offset[0],
y: copy.image_offset[1],
z: copy.image_offset[2],
},
imageExtent: vk::Extent3D {
width: copy.image_extent[0],
height: copy.image_extent[1],
depth: copy.image_extent[2],
},
}
})
.collect();
if regions.is_empty() {
return;
@ -559,23 +604,29 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert!(destination.inner().usage_transfer_dest());
debug_assert_eq!(destination.samples(), 1);
debug_assert!(dest_layout == ImageLayout::General ||
dest_layout == ImageLayout::TransferDstOptimal);
dest_layout == ImageLayout::TransferDstOptimal);
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdCopyBufferToImage(cmd, source.buffer.internal_object(),
destination.inner().internal_object(), dest_layout as u32,
regions.len() as u32, regions.as_ptr());
vk.CmdCopyBufferToImage(cmd,
source.buffer.internal_object(),
destination.inner().internal_object(),
dest_layout as u32,
regions.len() as u32,
regions.as_ptr());
}
/// Calls `vkCmdDispatch` on the builder.
#[inline]
pub unsafe fn dispatch(&mut self, dimensions: [u32; 3]) {
debug_assert!({
let max_dims = self.device().physical_device().limits().max_compute_work_group_count();
dimensions[0] <= max_dims[0] && dimensions[1] <= max_dims[1] &&
dimensions[2] <= max_dims[2]
});
let max_dims = self.device()
.physical_device()
.limits()
.max_compute_work_group_count();
dimensions[0] <= max_dims[0] && dimensions[1] <= max_dims[1] &&
dimensions[2] <= max_dims[2]
});
let vk = self.device().pointers();
let cmd = self.internal_object();
@ -595,27 +646,35 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert!(inner.buffer.usage_indirect_buffer());
debug_assert_eq!(inner.offset % 4, 0);
vk.CmdDispatchIndirect(cmd, inner.buffer.internal_object(), inner.offset as vk::DeviceSize);
vk.CmdDispatchIndirect(cmd,
inner.buffer.internal_object(),
inner.offset as vk::DeviceSize);
}
/// Calls `vkCmdDraw` on the builder.
#[inline]
pub unsafe fn draw(&mut self, vertex_count: u32, instance_count: u32, first_vertex: u32,
first_instance: u32)
{
first_instance: u32) {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdDraw(cmd, vertex_count, instance_count, first_vertex, first_instance);
vk.CmdDraw(cmd,
vertex_count,
instance_count,
first_vertex,
first_instance);
}
/// Calls `vkCmdDrawIndexed` on the builder.
#[inline]
pub unsafe fn draw_indexed(&mut self, index_count: u32, instance_count: u32, first_index: u32,
vertex_offset: i32, first_instance: u32)
{
pub unsafe fn draw_indexed(&mut self, index_count: u32, instance_count: u32,
first_index: u32, vertex_offset: i32, first_instance: u32) {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdDrawIndexed(cmd, index_count, instance_count, first_index, vertex_offset,
vk.CmdDrawIndexed(cmd,
index_count,
instance_count,
first_index,
vertex_offset,
first_instance);
}
@ -627,15 +686,19 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let vk = self.device().pointers();
let cmd = self.internal_object();
debug_assert!(draw_count == 0 || ((stride % 4) == 0) &&
stride as usize >= mem::size_of::<vk::DrawIndirectCommand>());
debug_assert!(draw_count == 0 ||
((stride % 4) == 0) &&
stride as usize >= mem::size_of::<vk::DrawIndirectCommand>());
let inner = buffer.inner();
debug_assert!(inner.offset < buffer.size());
debug_assert!(inner.buffer.usage_indirect_buffer());
vk.CmdDrawIndirect(cmd, inner.buffer.internal_object(), inner.offset as vk::DeviceSize,
draw_count, stride);
vk.CmdDrawIndirect(cmd,
inner.buffer.internal_object(),
inner.offset as vk::DeviceSize,
draw_count,
stride);
}
/// Calls `vkCmdDrawIndexedIndirect` on the builder.
@ -645,13 +708,16 @@ impl<P> UnsafeCommandBufferBuilder<P> {
{
let vk = self.device().pointers();
let cmd = self.internal_object();
let inner = buffer.inner();
debug_assert!(inner.offset < buffer.size());
debug_assert!(inner.buffer.usage_indirect_buffer());
vk.CmdDrawIndexedIndirect(cmd, inner.buffer.internal_object(),
inner.offset as vk::DeviceSize, draw_count, stride);
vk.CmdDrawIndexedIndirect(cmd,
inner.buffer.internal_object(),
inner.offset as vk::DeviceSize,
draw_count,
stride);
}
/// Calls `vkCmdEndRenderPass` on the builder.
@ -688,14 +754,20 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let size = buffer.size();
let (buffer_handle, offset) = {
let BufferInner { buffer: buffer_inner, offset } = buffer.inner();
let BufferInner {
buffer: buffer_inner,
offset,
} = buffer.inner();
debug_assert!(buffer_inner.usage_transfer_dest());
debug_assert_eq!(offset % 4, 0);
(buffer_inner.internal_object(), offset)
};
vk.CmdFillBuffer(cmd, buffer_handle, offset as vk::DeviceSize,
size as vk::DeviceSize, data);
vk.CmdFillBuffer(cmd,
buffer_handle,
offset as vk::DeviceSize,
size as vk::DeviceSize,
data);
}
/// Calls `vkCmdNextSubpass` on the builder.
@ -705,8 +777,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
let vk = self.device().pointers();
let cmd = self.internal_object();
let contents = if secondary { vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS }
else { vk::SUBPASS_CONTENTS_INLINE };
let contents = if secondary {
vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
} else {
vk::SUBPASS_CONTENTS_INLINE
};
vk.CmdNextSubpass(cmd, contents);
}
@ -727,8 +802,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert_ne!(command.src_stage_mask, 0);
debug_assert_ne!(command.dst_stage_mask, 0);
vk.CmdPipelineBarrier(cmd, command.src_stage_mask, command.dst_stage_mask,
command.dependency_flags, command.memory_barriers.len() as u32,
vk.CmdPipelineBarrier(cmd,
command.src_stage_mask,
command.dst_stage_mask,
command.dependency_flags,
command.memory_barriers.len() as u32,
command.memory_barriers.as_ptr(),
command.buffer_barriers.len() as u32,
command.buffer_barriers.as_ptr(),
@ -752,8 +830,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert_eq!(offset % 4, 0);
debug_assert!(mem::size_of_val(data) >= size as usize);
vk.CmdPushConstants(cmd, pipeline_layout.sys().internal_object(),
stages.into(), offset as u32, size as u32,
vk.CmdPushConstants(cmd,
pipeline_layout.sys().internal_object(),
stages.into(),
offset as u32,
size as u32,
data as *const D as *const _);
}
@ -774,7 +855,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
pub unsafe fn set_blend_constants(&mut self, constants: [f32; 4]) {
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdSetBlendConstants(cmd, constants); // TODO: correct to pass array?
vk.CmdSetBlendConstants(cmd, constants); // TODO: correct to pass array?
}
/// Calls `vkCmdSetDepthBias` on the builder.
@ -851,18 +932,20 @@ impl<P> UnsafeCommandBufferBuilder<P> {
pub unsafe fn set_scissor<I>(&mut self, first_scissor: u32, scissors: I)
where I: Iterator<Item = Scissor>
{
let scissors = scissors.map(|v| v.clone().into()).collect::<SmallVec<[_; 16]>>();
let scissors = scissors
.map(|v| v.clone().into())
.collect::<SmallVec<[_; 16]>>();
if scissors.is_empty() {
return;
}
// TODO: missing a debug assert for limits on the actual scissor values
debug_assert!((first_scissor == 0 && scissors.len() == 1) ||
self.device().enabled_features().multi_viewport);
self.device().enabled_features().multi_viewport);
debug_assert!({
let max = self.device().physical_device().limits().max_viewports();
first_scissor + scissors.len() as u32 <= max
});
let max = self.device().physical_device().limits().max_viewports();
first_scissor + scissors.len() as u32 <= max
});
let vk = self.device().pointers();
let cmd = self.internal_object();
@ -876,21 +959,26 @@ impl<P> UnsafeCommandBufferBuilder<P> {
pub unsafe fn set_viewport<I>(&mut self, first_viewport: u32, viewports: I)
where I: Iterator<Item = Viewport>
{
let viewports = viewports.map(|v| v.clone().into()).collect::<SmallVec<[_; 16]>>();
let viewports = viewports
.map(|v| v.clone().into())
.collect::<SmallVec<[_; 16]>>();
if viewports.is_empty() {
return;
}
debug_assert!((first_viewport == 0 && viewports.len() == 1) ||
self.device().enabled_features().multi_viewport);
self.device().enabled_features().multi_viewport);
debug_assert!({
let max = self.device().physical_device().limits().max_viewports();
first_viewport + viewports.len() as u32 <= max
});
let max = self.device().physical_device().limits().max_viewports();
first_viewport + viewports.len() as u32 <= max
});
let vk = self.device().pointers();
let cmd = self.internal_object();
vk.CmdSetViewport(cmd, first_viewport, viewports.len() as u32, viewports.as_ptr());
vk.CmdSetViewport(cmd,
first_viewport,
viewports.len() as u32,
viewports.as_ptr());
}
/// Calls `vkCmdUpdateBuffer` on the builder.
@ -908,13 +996,19 @@ impl<P> UnsafeCommandBufferBuilder<P> {
debug_assert!(size <= mem::size_of_val(data));
let (buffer_handle, offset) = {
let BufferInner { buffer: buffer_inner, offset } = buffer.inner();
let BufferInner {
buffer: buffer_inner,
offset,
} = buffer.inner();
debug_assert!(buffer_inner.usage_transfer_dest());
debug_assert_eq!(offset % 4, 0);
(buffer_inner.internal_object(), offset)
};
vk.CmdUpdateBuffer(cmd, buffer_handle, offset as vk::DeviceSize, size as vk::DeviceSize,
vk.CmdUpdateBuffer(cmd,
buffer_handle,
offset as vk::DeviceSize,
size as vk::DeviceSize,
data as *const D as *const _);
}
}
@ -976,9 +1070,7 @@ impl UnsafeCommandBufferBuilderExecuteCommands {
/// Builds a new empty list.
#[inline]
pub fn new() -> UnsafeCommandBufferBuilderExecuteCommands {
UnsafeCommandBufferBuilderExecuteCommands {
raw_cbs: SmallVec::new(),
}
UnsafeCommandBufferBuilderExecuteCommands { raw_cbs: SmallVec::new() }
}
/// Adds a command buffer to the list.
@ -1002,9 +1094,15 @@ pub struct UnsafeCommandBufferBuilderImageAspect {
impl UnsafeCommandBufferBuilderImageAspect {
pub(crate) fn to_vk_bits(&self) -> vk::ImageAspectFlagBits {
let mut out = 0;
if self.color { out |= vk::IMAGE_ASPECT_COLOR_BIT };
if self.depth { out |= vk::IMAGE_ASPECT_DEPTH_BIT };
if self.stencil { out |= vk::IMAGE_ASPECT_STENCIL_BIT };
if self.color {
out |= vk::IMAGE_ASPECT_COLOR_BIT
};
if self.depth {
out |= vk::IMAGE_ASPECT_DEPTH_BIT
};
if self.stencil {
out |= vk::IMAGE_ASPECT_STENCIL_BIT
};
out
}
}
@ -1072,8 +1170,10 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
self.dst_stage_mask |= other.dst_stage_mask;
self.dependency_flags &= other.dependency_flags;
self.memory_barriers.extend(other.memory_barriers.into_iter());
self.buffer_barriers.extend(other.buffer_barriers.into_iter());
self.memory_barriers
.extend(other.memory_barriers.into_iter());
self.buffer_barriers
.extend(other.buffer_barriers.into_iter());
self.image_barriers.extend(other.image_barriers.into_iter());
}
@ -1087,9 +1187,8 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
/// - There are certain rules regarding the pipeline barriers inside render passes.
///
#[inline]
pub unsafe fn add_execution_dependency(&mut self, source: PipelineStages, dest: PipelineStages,
by_region: bool)
{
pub unsafe fn add_execution_dependency(&mut self, source: PipelineStages,
dest: PipelineStages, by_region: bool) {
if !by_region {
self.dependency_flags = 0;
}
@ -1113,19 +1212,18 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
///
pub unsafe fn add_memory_barrier(&mut self, source_stage: PipelineStages,
source_access: AccessFlagBits, dest_stage: PipelineStages,
dest_access: AccessFlagBits, by_region: bool)
{
dest_access: AccessFlagBits, by_region: bool) {
debug_assert!(source_access.is_compatible_with(&source_stage));
debug_assert!(dest_access.is_compatible_with(&dest_stage));
self.add_execution_dependency(source_stage, dest_stage, by_region);
self.memory_barriers.push(vk::MemoryBarrier {
sType: vk::STRUCTURE_TYPE_MEMORY_BARRIER,
pNext: ptr::null(),
srcAccessMask: source_access.into(),
dstAccessMask: dest_access.into(),
});
sType: vk::STRUCTURE_TYPE_MEMORY_BARRIER,
pNext: ptr::null(),
srcAccessMask: source_access.into(),
dstAccessMask: dest_access.into(),
});
}
/// Adds a buffer memory barrier. This means that all the memory writes to the given buffer by
@ -1143,11 +1241,12 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
/// is added.
/// - Queue ownership transfers must be correct.
///
pub unsafe fn add_buffer_memory_barrier<B>
(&mut self, buffer: &B, source_stage: PipelineStages,
source_access: AccessFlagBits, dest_stage: PipelineStages,
dest_access: AccessFlagBits, by_region: bool,
queue_transfer: Option<(u32, u32)>, offset: usize, size: usize)
pub unsafe fn add_buffer_memory_barrier<B>(&mut self, buffer: &B, source_stage: PipelineStages,
source_access: AccessFlagBits,
dest_stage: PipelineStages,
dest_access: AccessFlagBits, by_region: bool,
queue_transfer: Option<(u32, u32)>, offset: usize,
size: usize)
where B: ?Sized + BufferAccess
{
debug_assert!(source_access.is_compatible_with(&source_stage));
@ -1156,7 +1255,10 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
self.add_execution_dependency(source_stage, dest_stage, by_region);
debug_assert!(size <= buffer.size());
let BufferInner { buffer, offset: org_offset } = buffer.inner();
let BufferInner {
buffer,
offset: org_offset,
} = buffer.inner();
let offset = offset + org_offset;
let (src_queue, dest_queue) = if let Some((src_queue, dest_queue)) = queue_transfer {
@ -1166,16 +1268,16 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
};
self.buffer_barriers.push(vk::BufferMemoryBarrier {
sType: vk::STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
pNext: ptr::null(),
srcAccessMask: source_access.into(),
dstAccessMask: dest_access.into(),
srcQueueFamilyIndex: src_queue,
dstQueueFamilyIndex: dest_queue,
buffer: buffer.internal_object(),
offset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
});
sType: vk::STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
pNext: ptr::null(),
srcAccessMask: source_access.into(),
dstAccessMask: dest_access.into(),
srcQueueFamilyIndex: src_queue,
dstQueueFamilyIndex: dest_queue,
buffer: buffer.internal_object(),
offset: offset as vk::DeviceSize,
size: size as vk::DeviceSize,
});
}
/// Adds an image memory barrier. This is the equivalent of `add_buffer_memory_barrier` but
@ -1196,10 +1298,12 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
/// - Access flags must be compatible with the image usage flags passed at image creation.
///
pub unsafe fn add_image_memory_barrier<I>(&mut self, image: &I, mipmaps: Range<u32>,
layers: Range<u32>, source_stage: PipelineStages, source_access: AccessFlagBits,
dest_stage: PipelineStages, dest_access: AccessFlagBits, by_region: bool,
queue_transfer: Option<(u32, u32)>, current_layout: ImageLayout,
new_layout: ImageLayout)
layers: Range<u32>, source_stage: PipelineStages,
source_access: AccessFlagBits,
dest_stage: PipelineStages,
dest_access: AccessFlagBits, by_region: bool,
queue_transfer: Option<(u32, u32)>,
current_layout: ImageLayout, new_layout: ImageLayout)
where I: ?Sized + ImageAccess
{
debug_assert!(source_access.is_compatible_with(&source_stage));
@ -1234,23 +1338,23 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
};
self.image_barriers.push(vk::ImageMemoryBarrier {
sType: vk::STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
pNext: ptr::null(),
srcAccessMask: source_access.into(),
dstAccessMask: dest_access.into(),
oldLayout: current_layout as u32,
newLayout: new_layout as u32,
srcQueueFamilyIndex: src_queue,
dstQueueFamilyIndex: dest_queue,
image: image.inner().internal_object(),
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmaps.start,
levelCount: mipmaps.end - mipmaps.start,
baseArrayLayer: layers.start,
layerCount: layers.end - layers.start,
},
});
sType: vk::STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
pNext: ptr::null(),
srcAccessMask: source_access.into(),
dstAccessMask: dest_access.into(),
oldLayout: current_layout as u32,
newLayout: new_layout as u32,
srcQueueFamilyIndex: src_queue,
dstQueueFamilyIndex: dest_queue,
image: image.inner().internal_object(),
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmaps.start,
levelCount: mipmaps.end - mipmaps.start,
baseArrayLayer: layers.start,
layerCount: layers.end - layers.start,
},
});
}
}

View File

@ -14,6 +14,8 @@ use std::sync::Mutex;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use SafeDeref;
use VulkanObject;
use buffer::BufferAccess;
use command_buffer::submit::SubmitAnyBuilder;
use command_buffer::submit::SubmitCommandBufferBuilder;
@ -21,18 +23,16 @@ use command_buffer::sys::UnsafeCommandBuffer;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageLayout;
use image::ImageAccess;
use sync::now;
use sync::AccessError;
use image::ImageLayout;
use sync::AccessCheckError;
use sync::AccessError;
use sync::AccessFlagBits;
use sync::FlushError;
use sync::NowFuture;
use sync::GpuFuture;
use sync::NowFuture;
use sync::PipelineStages;
use SafeDeref;
use VulkanObject;
use sync::now;
pub unsafe trait CommandBuffer: DeviceOwned {
/// The command pool of the command buffer.
@ -114,9 +114,11 @@ pub unsafe trait CommandBuffer: DeviceOwned {
#[inline]
fn execute_after<F>(self, future: F, queue: Arc<Queue>)
-> Result<CommandBufferExecFuture<F, Self>, CommandBufferExecError>
where Self: Sized + 'static, F: GpuFuture
where Self: Sized + 'static,
F: GpuFuture
{
assert_eq!(self.device().internal_object(), future.device().internal_object());
assert_eq!(self.device().internal_object(),
future.device().internal_object());
self.prepare_submit(&future, &queue)?;
@ -125,18 +127,19 @@ pub unsafe trait CommandBuffer: DeviceOwned {
}
Ok(CommandBufferExecFuture {
previous: future,
command_buffer: self,
queue: queue,
submitted: Mutex::new(false),
finished: AtomicBool::new(false),
})
previous: future,
command_buffer: self,
queue: queue,
submitted: Mutex::new(false),
finished: AtomicBool::new(false),
})
}
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>;
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>;
// FIXME: lots of other methods
@ -153,7 +156,10 @@ pub unsafe trait CommandBufferBuild {
fn build(self) -> Result<Self::Out, Self::Err>;
}
unsafe impl<T> CommandBuffer for T where T: SafeDeref, T::Target: CommandBuffer {
unsafe impl<T> CommandBuffer for T
where T: SafeDeref,
T::Target: CommandBuffer
{
type PoolAlloc = <T::Target as CommandBuffer>::PoolAlloc;
#[inline]
@ -162,21 +168,22 @@ unsafe impl<T> CommandBuffer for T where T: SafeDeref, T::Target: CommandBuffer
}
#[inline]
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue)
-> Result<(), CommandBufferExecError> {
(**self).prepare_submit(future, queue)
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
fn check_buffer_access(
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
(**self).check_buffer_access(buffer, exclusive, queue)
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
(**self).check_image_access(image, layout, exclusive, queue)
}
}
@ -184,7 +191,10 @@ unsafe impl<T> CommandBuffer for T where T: SafeDeref, T::Target: CommandBuffer
/// Represents a command buffer being executed by the GPU and the moment when the execution
/// finishes.
#[must_use = "Dropping this object will immediately block the thread until the GPU has finished processing the submission"]
pub struct CommandBufferExecFuture<F, Cb> where F: GpuFuture, Cb: CommandBuffer {
pub struct CommandBufferExecFuture<F, Cb>
where F: GpuFuture,
Cb: CommandBuffer
{
previous: F,
command_buffer: Cb,
queue: Arc<Queue>,
@ -196,7 +206,8 @@ pub struct CommandBufferExecFuture<F, Cb> where F: GpuFuture, Cb: CommandBuffer
}
unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
where F: GpuFuture, Cb: CommandBuffer
where F: GpuFuture,
Cb: CommandBuffer
{
#[inline]
fn cleanup_finished(&mut self) {
@ -204,30 +215,31 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
}
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
Ok(match try!(self.previous.build_submission()) {
SubmitAnyBuilder::Empty => {
let mut builder = SubmitCommandBufferBuilder::new();
builder.add_command_buffer(self.command_buffer.inner());
SubmitAnyBuilder::CommandBuffer(builder)
},
SubmitAnyBuilder::SemaphoresWait(sem) => {
let mut builder: SubmitCommandBufferBuilder = sem.into();
builder.add_command_buffer(self.command_buffer.inner());
SubmitAnyBuilder::CommandBuffer(builder)
},
SubmitAnyBuilder::CommandBuffer(mut builder) => {
// FIXME: add pipeline barrier
builder.add_command_buffer(self.command_buffer.inner());
SubmitAnyBuilder::CommandBuffer(builder)
},
SubmitAnyBuilder::QueuePresent(_) | SubmitAnyBuilder::BindSparse(_) => {
unimplemented!() // TODO:
Ok(match self.previous.build_submission()? {
SubmitAnyBuilder::Empty => {
let mut builder = SubmitCommandBufferBuilder::new();
builder.add_command_buffer(self.command_buffer.inner());
SubmitAnyBuilder::CommandBuffer(builder)
},
SubmitAnyBuilder::SemaphoresWait(sem) => {
let mut builder: SubmitCommandBufferBuilder = sem.into();
builder.add_command_buffer(self.command_buffer.inner());
SubmitAnyBuilder::CommandBuffer(builder)
},
SubmitAnyBuilder::CommandBuffer(mut builder) => {
// FIXME: add pipeline barrier
builder.add_command_buffer(self.command_buffer.inner());
SubmitAnyBuilder::CommandBuffer(builder)
},
SubmitAnyBuilder::QueuePresent(_) |
SubmitAnyBuilder::BindSparse(_) => {
unimplemented!() // TODO:
/*present.submit(); // TODO: wrong
let mut builder = SubmitCommandBufferBuilder::new();
builder.add_command_buffer(self.command_buffer.inner());
SubmitAnyBuilder::CommandBuffer(builder)*/
},
})
},
})
}
#[inline]
@ -240,10 +252,10 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
let queue = self.queue.clone();
match try!(self.build_submission()) {
match self.build_submission()? {
SubmitAnyBuilder::Empty => {},
SubmitAnyBuilder::CommandBuffer(builder) => {
try!(builder.submit(&queue));
builder.submit(&queue)?;
},
_ => unreachable!(),
};
@ -271,10 +283,11 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
}
#[inline]
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
match self.command_buffer.check_buffer_access(buffer, exclusive, queue) {
fn check_buffer_access(
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
match self.command_buffer
.check_buffer_access(buffer, exclusive, queue) {
Ok(v) => Ok(v),
Err(AccessCheckError::Denied(err)) => Err(AccessCheckError::Denied(err)),
Err(AccessCheckError::Unknown) => {
@ -284,21 +297,24 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
}
#[inline]
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
{
match self.command_buffer.check_image_access(image, layout, exclusive, queue) {
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
queue: &Queue)
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
match self.command_buffer
.check_image_access(image, layout, exclusive, queue) {
Ok(v) => Ok(v),
Err(AccessCheckError::Denied(err)) => Err(AccessCheckError::Denied(err)),
Err(AccessCheckError::Unknown) => {
self.previous.check_image_access(image, layout, exclusive, queue)
self.previous
.check_image_access(image, layout, exclusive, queue)
},
}
}
}
unsafe impl<F, Cb> DeviceOwned for CommandBufferExecFuture<F, Cb>
where F: GpuFuture, Cb: CommandBuffer
where F: GpuFuture,
Cb: CommandBuffer
{
#[inline]
fn device(&self) -> &Arc<Device> {
@ -306,7 +322,10 @@ unsafe impl<F, Cb> DeviceOwned for CommandBufferExecFuture<F, Cb>
}
}
impl<F, Cb> Drop for CommandBufferExecFuture<F, Cb> where F: GpuFuture, Cb: CommandBuffer {
impl<F, Cb> Drop for CommandBufferExecFuture<F, Cb>
where F: GpuFuture,
Cb: CommandBuffer
{
fn drop(&mut self) {
unsafe {
if !*self.finished.get_mut() {

View File

@ -11,10 +11,10 @@ use std::cmp;
use std::error;
use std::fmt;
use VulkanObject;
use buffer::BufferAccess;
use device::Device;
use device::DeviceOwned;
use VulkanObject;
/// Checks whether a copy buffer command is valid.
///
@ -28,8 +28,10 @@ pub fn check_copy_buffer<S, D>(device: &Device, source: &S, destination: &D)
where S: ?Sized + BufferAccess,
D: ?Sized + BufferAccess
{
assert_eq!(source.inner().buffer.device().internal_object(), device.internal_object());
assert_eq!(destination.inner().buffer.device().internal_object(), device.internal_object());
assert_eq!(source.inner().buffer.device().internal_object(),
device.internal_object());
assert_eq!(destination.inner().buffer.device().internal_object(),
device.internal_object());
if !source.inner().buffer.usage_transfer_src() {
return Err(CheckCopyBufferError::SourceMissingTransferUsage);

View File

@ -10,10 +10,10 @@
use std::error;
use std::fmt;
use VulkanObject;
use buffer::BufferAccess;
use device::Device;
use device::DeviceOwned;
use VulkanObject;
/// Checks whether a fill buffer command is valid.
///
@ -24,7 +24,8 @@ use VulkanObject;
pub fn check_fill_buffer<B>(device: &Device, buffer: &B) -> Result<(), CheckFillBufferError>
where B: ?Sized + BufferAccess
{
assert_eq!(buffer.inner().buffer.device().internal_object(), device.internal_object());
assert_eq!(buffer.inner().buffer.device().internal_object(),
device.internal_object());
if !buffer.inner().buffer.usage_transfer_dest() {
return Err(CheckFillBufferError::BufferMissingUsage);

View File

@ -9,10 +9,10 @@
//! Functions that check the validity of commands.
pub use self::copy_buffer::{check_copy_buffer, CheckCopyBufferError};
pub use self::dynamic_state::{check_dynamic_state_validity, CheckDynamicStateValidityError};
pub use self::fill_buffer::{check_fill_buffer, CheckFillBufferError};
pub use self::update_buffer::{check_update_buffer, CheckUpdateBufferError};
pub use self::copy_buffer::{CheckCopyBufferError, check_copy_buffer};
pub use self::dynamic_state::{CheckDynamicStateValidityError, check_dynamic_state_validity};
pub use self::fill_buffer::{CheckFillBufferError, check_fill_buffer};
pub use self::update_buffer::{CheckUpdateBufferError, check_update_buffer};
mod copy_buffer;
mod dynamic_state;

View File

@ -12,10 +12,10 @@ use std::error;
use std::fmt;
use std::mem;
use VulkanObject;
use buffer::BufferAccess;
use device::Device;
use device::DeviceOwned;
use VulkanObject;
/// Checks whether an update buffer command is valid.
///
@ -28,7 +28,8 @@ pub fn check_update_buffer<B, D>(device: &Device, buffer: &B, data: &D)
where B: ?Sized + BufferAccess,
D: ?Sized
{
assert_eq!(buffer.inner().buffer.device().internal_object(), device.internal_object());
assert_eq!(buffer.inner().buffer.device().internal_object(),
device.internal_object());
if !buffer.inner().buffer.usage_transfer_dest() {
return Err(CheckUpdateBufferError::BufferMissingUsage);

View File

@ -8,14 +8,14 @@
// according to those terms.
//! Description of a single descriptor.
//!
//!
//! This module contains traits and structs related to describing a single descriptor. A descriptor
//! is a slot where you can bind a buffer or an image so that it can be accessed from your shaders.
//! In order to specify which buffer or image to bind to a descriptor, see the `descriptor_set`
//! module.
//!
//!
//! There are four different kinds of descriptors that give access to buffers:
//!
//!
//! - Uniform texel buffers. Gives read-only access to the content of a buffer. Only supports
//! certain buffer formats.
//! - Storage texel buffers. Gives read and/or write access to the content of a buffer. Only
@ -25,9 +25,9 @@
//! sometimes slower than uniform texel buffers.
//! - Storage buffers. Gives read and/or write access to the content of a buffer. Less restrictive
//! but sometimes slower than uniform buffers and storage texel buffers.
//!
//!
//! There are five different kinds of descriptors related to images:
//!
//!
//! - Storage images. Gives read and/or write access to individual pixels in an image. The image
//! cannot be sampled. In other words, you have exactly specify which pixel to read or write.
//! - Sampled images. Gives read-only access to an image. Before you can use a sampled image in a
@ -36,14 +36,14 @@
//! - Samplers. Doesn't contain an image but a sampler object that describes how an image will be
//! accessed. This is meant to be combined with a sampled image (see above).
//! - Combined image and sampler. Similar to a sampled image, but also directly includes the
//! sampler which indicates how the sampling is done.
//! sampler which indicates how the sampling is done.
//! - Input attachments. The fastest but also most restrictive access to images. Must be integrated
//! in a render pass. Can only give access to the same pixel as the one you're processing.
//!
//!
use format::Format;
use std::cmp;
use std::ops::BitOr;
use format::Format;
use vk;
/// Contains the exact description of a single descriptor.
@ -77,9 +77,8 @@ impl DescriptorDesc {
// TODO: return Result instead of bool
#[inline]
pub fn is_superset_of(&self, other: &DescriptorDesc) -> bool {
self.ty.is_superset_of(&other.ty) &&
self.array_count >= other.array_count && self.stages.is_superset_of(&other.stages) &&
(!self.readonly || other.readonly)
self.ty.is_superset_of(&other.ty) && self.array_count >= other.array_count &&
self.stages.is_superset_of(&other.stages) && (!self.readonly || other.readonly)
}
/// Builds a `DescriptorDesc` that is the union of `self` and `other`, if possible.
@ -89,22 +88,24 @@ impl DescriptorDesc {
// TODO: add example
#[inline]
pub fn union(&self, other: &DescriptorDesc) -> Option<DescriptorDesc> {
if self.ty != other.ty { return None; }
if self.ty != other.ty {
return None;
}
Some(DescriptorDesc {
ty: self.ty.clone(),
array_count: cmp::max(self.array_count, other.array_count),
stages: self.stages | other.stages,
readonly: self.readonly && other.readonly,
})
ty: self.ty.clone(),
array_count: cmp::max(self.array_count, other.array_count),
stages: self.stages | other.stages,
readonly: self.readonly && other.readonly,
})
}
}
/// Describes the content and layout of each array element of a descriptor.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DescriptorDescTy {
Sampler, // TODO: the sampler has some restrictions as well
CombinedImageSampler(DescriptorImageDesc), // TODO: the sampler has some restrictions as well
Sampler, // TODO: the sampler has some restrictions as well
CombinedImageSampler(DescriptorImageDesc), // TODO: the sampler has some restrictions as well
Image(DescriptorImageDesc),
TexelBuffer {
/// If `true`, this describes a storage texel buffer.
@ -131,27 +132,36 @@ impl DescriptorDescTy {
// TODO: add example
pub fn ty(&self) -> Option<DescriptorType> {
Some(match *self {
DescriptorDescTy::Sampler => DescriptorType::Sampler,
DescriptorDescTy::CombinedImageSampler(_) => DescriptorType::CombinedImageSampler,
DescriptorDescTy::Image(ref desc) => {
if desc.sampled { DescriptorType::SampledImage }
else { DescriptorType::StorageImage }
},
DescriptorDescTy::InputAttachment { .. } => DescriptorType::InputAttachment,
DescriptorDescTy::Buffer(ref desc) => {
let dynamic = match desc.dynamic { Some(d) => d, None => return None };
match (desc.storage, dynamic) {
(false, false) => DescriptorType::UniformBuffer,
(true, false) => DescriptorType::StorageBuffer,
(false, true) => DescriptorType::UniformBufferDynamic,
(true, true) => DescriptorType::StorageBufferDynamic,
}
},
DescriptorDescTy::TexelBuffer { storage, .. } => {
if storage { DescriptorType::StorageTexelBuffer }
else { DescriptorType::UniformTexelBuffer }
},
})
DescriptorDescTy::Sampler => DescriptorType::Sampler,
DescriptorDescTy::CombinedImageSampler(_) => DescriptorType::CombinedImageSampler,
DescriptorDescTy::Image(ref desc) => {
if desc.sampled {
DescriptorType::SampledImage
} else {
DescriptorType::StorageImage
}
},
DescriptorDescTy::InputAttachment { .. } => DescriptorType::InputAttachment,
DescriptorDescTy::Buffer(ref desc) => {
let dynamic = match desc.dynamic {
Some(d) => d,
None => return None,
};
match (desc.storage, dynamic) {
(false, false) => DescriptorType::UniformBuffer,
(true, false) => DescriptorType::StorageBuffer,
(false, true) => DescriptorType::UniformBufferDynamic,
(true, true) => DescriptorType::StorageBufferDynamic,
}
},
DescriptorDescTy::TexelBuffer { storage, .. } => {
if storage {
DescriptorType::StorageTexelBuffer
} else {
DescriptorType::UniformTexelBuffer
}
},
})
}
/// Checks whether we are a superset of another descriptor type.
@ -164,14 +174,17 @@ impl DescriptorDescTy {
(&DescriptorDescTy::CombinedImageSampler(ref me),
&DescriptorDescTy::CombinedImageSampler(ref other)) => me.is_superset_of(other),
(&DescriptorDescTy::Image(ref me),
&DescriptorDescTy::Image(ref other)) => me.is_superset_of(other),
(&DescriptorDescTy::Image(ref me), &DescriptorDescTy::Image(ref other)) =>
me.is_superset_of(other),
(&DescriptorDescTy::InputAttachment { multisampled: me_multisampled,
array_layers: me_array_layers },
&DescriptorDescTy::InputAttachment { multisampled: other_multisampled,
array_layers: other_array_layers }) =>
{
(&DescriptorDescTy::InputAttachment {
multisampled: me_multisampled,
array_layers: me_array_layers,
},
&DescriptorDescTy::InputAttachment {
multisampled: other_multisampled,
array_layers: other_array_layers,
}) => {
me_multisampled == other_multisampled && me_array_layers == other_array_layers
},
@ -188,9 +201,14 @@ impl DescriptorDescTy {
}
},
(&DescriptorDescTy::TexelBuffer { storage: me_storage, format: me_format },
&DescriptorDescTy::TexelBuffer { storage: other_storage, format: other_format }) =>
{
(&DescriptorDescTy::TexelBuffer {
storage: me_storage,
format: me_format,
},
&DescriptorDescTy::TexelBuffer {
storage: other_storage,
format: other_format,
}) => {
if me_storage != other_storage {
return false;
}
@ -204,7 +222,7 @@ impl DescriptorDescTy {
},
// Any other combination is invalid.
_ => false
_ => false,
}
}
}
@ -240,7 +258,9 @@ impl DescriptorImageDesc {
}
match (self.format, other.format) {
(Some(a), Some(b)) => if a != b { return false; },
(Some(a), Some(b)) => if a != b {
return false;
},
(Some(_), None) => (),
(None, None) => (),
(None, Some(_)) => return false,
@ -249,16 +269,17 @@ impl DescriptorImageDesc {
match (self.array_layers, other.array_layers) {
(DescriptorImageDescArray::NonArrayed, DescriptorImageDescArray::NonArrayed) => (),
(DescriptorImageDescArray::Arrayed { max_layers: my_max },
DescriptorImageDescArray::Arrayed { max_layers: other_max }) =>
{
DescriptorImageDescArray::Arrayed { max_layers: other_max }) => {
match (my_max, other_max) {
(Some(m), Some(o)) => if m < o { return false; },
(Some(m), Some(o)) => if m < o {
return false;
},
(Some(_), None) => (),
(None, Some(_)) => return false,
(None, None) => (), // TODO: is this correct?
};
},
_ => return false
_ => return false,
};
true
@ -269,7 +290,7 @@ impl DescriptorImageDesc {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum DescriptorImageDescArray {
NonArrayed,
Arrayed { max_layers: Option<u32> }
Arrayed { max_layers: Option<u32> },
}
// TODO: documentation
@ -294,11 +315,10 @@ pub struct DescriptorBufferDesc {
pub enum DescriptorBufferContentDesc {
F32,
F64,
Struct {
},
Struct {},
Array {
len: Box<DescriptorBufferContentDesc>, num_array: usize
len: Box<DescriptorBufferContentDesc>,
num_array: usize,
},
}
@ -401,11 +421,10 @@ impl ShaderStages {
#[inline]
pub fn is_superset_of(&self, other: &ShaderStages) -> bool {
(self.vertex || !other.vertex) &&
(self.tessellation_control || !other.tessellation_control) &&
(self.tessellation_evaluation || !other.tessellation_evaluation) &&
(self.geometry || !other.geometry) &&
(self.fragment || !other.fragment) &&
(self.compute || !other.compute)
(self.tessellation_control || !other.tessellation_control) &&
(self.tessellation_evaluation || !other.tessellation_evaluation) &&
(self.geometry || !other.geometry) && (self.fragment || !other.fragment) &&
(self.compute || !other.compute)
}
/// Checks whether any of the stages in `self` are also present in `other`.
@ -413,11 +432,10 @@ impl ShaderStages {
#[inline]
pub fn intersects(&self, other: &ShaderStages) -> bool {
(self.vertex && other.vertex) ||
(self.tessellation_control && other.tessellation_control) ||
(self.tessellation_evaluation && other.tessellation_evaluation) ||
(self.geometry && other.geometry) ||
(self.fragment && other.fragment) ||
(self.compute && other.compute)
(self.tessellation_control && other.tessellation_control) ||
(self.tessellation_evaluation && other.tessellation_evaluation) ||
(self.geometry && other.geometry) || (self.fragment && other.fragment) ||
(self.compute && other.compute)
}
}
@ -442,12 +460,24 @@ impl Into<vk::ShaderStageFlags> for ShaderStages {
#[inline]
fn into(self) -> vk::ShaderStageFlags {
let mut result = 0;
if self.vertex { result |= vk::SHADER_STAGE_VERTEX_BIT; }
if self.tessellation_control { result |= vk::SHADER_STAGE_TESSELLATION_CONTROL_BIT; }
if self.tessellation_evaluation { result |= vk::SHADER_STAGE_TESSELLATION_EVALUATION_BIT; }
if self.geometry { result |= vk::SHADER_STAGE_GEOMETRY_BIT; }
if self.fragment { result |= vk::SHADER_STAGE_FRAGMENT_BIT; }
if self.compute { result |= vk::SHADER_STAGE_COMPUTE_BIT; }
if self.vertex {
result |= vk::SHADER_STAGE_VERTEX_BIT;
}
if self.tessellation_control {
result |= vk::SHADER_STAGE_TESSELLATION_CONTROL_BIT;
}
if self.tessellation_evaluation {
result |= vk::SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
}
if self.geometry {
result |= vk::SHADER_STAGE_GEOMETRY_BIT;
}
if self.fragment {
result |= vk::SHADER_STAGE_FRAGMENT_BIT;
}
if self.compute {
result |= vk::SHADER_STAGE_COMPUTE_BIT;
}
result
}
}

View File

@ -7,12 +7,12 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::iter;
use buffer::BufferAccess;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::DescriptorSetDesc;
use image::ImageAccess;
use std::iter;
/// A collection of descriptor set objects.
pub unsafe trait DescriptorSetsCollection {
@ -74,7 +74,7 @@ unsafe impl<T> DescriptorSetsCollection for T
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
match set {
0 => Some(self.num_bindings()),
_ => None
_ => None,
}
}
@ -82,7 +82,7 @@ unsafe impl<T> DescriptorSetsCollection for T
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
match set {
0 => self.descriptor(binding),
_ => None
_ => None,
}
}
@ -185,4 +185,29 @@ macro_rules! impl_collection {
($i:ident) => ();
}
impl_collection!(Z, Y, X, W, V, U, T, S, R, Q, P, O, N, M, L, K, J, I, H, G, F, E, D, C, B, A);
impl_collection!(Z,
Y,
X,
W,
V,
U,
T,
S,
R,
Q,
P,
O,
N,
M,
L,
K,
J,
I,
H,
G,
F,
E,
D,
C,
B,
A);

View File

@ -8,10 +8,10 @@
// according to those terms.
//! Descriptor sets creation and management
//!
//!
//! This module is dedicated to managing descriptor sets. There are three concepts in Vulkan
//! related to descriptor sets:
//!
//!
//! - A `DescriptorSetLayout` is a Vulkan object that describes to the Vulkan implementation the
//! layout of a future descriptor set. When you allocate a descriptor set, you have to pass an
//! instance of this object. This is represented with the `UnsafeDescriptorSetLayout` type in
@ -21,9 +21,9 @@
//! `UnsafeDescriptorPool` type in vulkano.
//! - A `DescriptorSet` contains the bindings to resources and is allocated from a pool. This is
//! represented with the `UnsafeDescriptorSet` type in vulkano.
//!
//!
//! In addition to this, vulkano defines the following:
//!
//!
//! - The `DescriptorPool` trait can be implemented on types from which you can allocate and free
//! descriptor sets. However it is different from Vulkan descriptor pools in the sense that an
//! implementation of the `DescriptorPool` trait can manage multiple Vulkan descriptor pools.
@ -35,15 +35,15 @@
//! - The `DescriptorSetsCollection` trait is implemented on collections of types that implement
//! `DescriptorSet`. It is what you pass to the draw functions.
use SafeDeref;
use buffer::BufferAccess;
use descriptor::descriptor::DescriptorDesc;
use image::ImageAccess;
use SafeDeref;
pub use self::collection::DescriptorSetsCollection;
pub use self::simple::*;
pub use self::std_pool::StdDescriptorPool;
pub use self::std_pool::StdDescriptorPoolAlloc;
pub use self::simple::*;
pub use self::sys::DescriptorPool;
pub use self::sys::DescriptorPoolAlloc;
pub use self::sys::DescriptorPoolAllocError;
@ -77,7 +77,10 @@ pub unsafe trait DescriptorSet: DescriptorSetDesc {
fn images_list<'a>(&'a self) -> Box<Iterator<Item = &'a ImageAccess> + 'a>;
}
unsafe impl<T> DescriptorSet for T where T: SafeDeref, T::Target: DescriptorSet {
unsafe impl<T> DescriptorSet for T
where T: SafeDeref,
T::Target: DescriptorSet
{
#[inline]
fn inner(&self) -> &UnsafeDescriptorSet {
(**self).inner()
@ -103,7 +106,10 @@ pub unsafe trait DescriptorSetDesc {
fn descriptor(&self, binding: usize) -> Option<DescriptorDesc>;
}
unsafe impl<T> DescriptorSetDesc for T where T: SafeDeref, T::Target: DescriptorSetDesc {
unsafe impl<T> DescriptorSetDesc for T
where T: SafeDeref,
T::Target: DescriptorSetDesc
{
#[inline]
fn num_bindings(&self) -> usize {
(**self).num_bindings()

View File

@ -1,98 +1,106 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use std::sync::Arc;
use buffer::BufferAccess;
use buffer::BufferViewRef;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::DescriptorType;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::DescriptorSetDesc;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::descriptor_set::DescriptorPool;
use descriptor::descriptor_set::DescriptorPoolAlloc;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor_set::DescriptorWrite;
use descriptor::descriptor_set::StdDescriptorPool;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use device::Device;
use device::DeviceOwned;
use image::ImageAccess;
use image::ImageLayout;
use image::ImageViewAccess;
use sampler::Sampler;
use sync::AccessFlagBits;
use sync::PipelineStages;
/// A simple immutable descriptor set.
///
/// It is named "simple" because creating such a descriptor set allocates from a pool, and because
/// it can't be modified once created. It is sufficient for most usages, but in some situations
/// you may wish to use something more optimized instead.
///
/// In order to build a `SimpleDescriptorSet`, you need to use a `SimpleDescriptorSetBuilder`. But
/// the easiest way is to use the `simple_descriptor_set!` macro.
///
/// The template parameter of the `SimpleDescriptorSet` is very complex, and you shouldn't try to
/// express it explicitely. If you want to store your descriptor set in a struct or in a `Vec` for
/// example, you are encouraged to turn `SimpleDescriptorSet` into a `Box<DescriptorSet>` or a
/// `Arc<DescriptorSet>`.
///
/// # Example
// TODO:
pub struct SimpleDescriptorSet<R, P = Arc<StdDescriptorPool>> where P: DescriptorPool {
inner: P::Alloc,
resources: R,
layout: Arc<UnsafeDescriptorSetLayout>
}
impl<R, P> SimpleDescriptorSet<R, P> where P: DescriptorPool {
/// Returns the layout used to create this descriptor set.
#[inline]
pub fn set_layout(&self) -> &Arc<UnsafeDescriptorSetLayout> {
&self.layout
}
}
unsafe impl<R, P> DescriptorSet for SimpleDescriptorSet<R, P> where P: DescriptorPool {
#[inline]
fn inner(&self) -> &UnsafeDescriptorSet {
self.inner.inner()
}
#[inline]
fn buffers_list<'a>(&'a self) -> Box<Iterator<Item = &'a BufferAccess> + 'a> {
unimplemented!()
}
#[inline]
fn images_list<'a>(&'a self) -> Box<Iterator<Item = &'a ImageAccess> + 'a> {
unimplemented!()
}
}
unsafe impl<R, P> DescriptorSetDesc for SimpleDescriptorSet<R, P> where P: DescriptorPool {
#[inline]
fn num_bindings(&self) -> usize {
unimplemented!() // FIXME:
}
#[inline]
fn descriptor(&self, binding: usize) -> Option<DescriptorDesc> {
unimplemented!() // FIXME:
}
}
/// Builds a descriptor set in the form of a `SimpleDescriptorSet` object.
// TODO: more doc
#[macro_export]
use buffer::BufferAccess;
use buffer::BufferViewRef;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::DescriptorType;
use descriptor::descriptor_set::DescriptorPool;
use descriptor::descriptor_set::DescriptorPoolAlloc;
use descriptor::descriptor_set::DescriptorSet;
use descriptor::descriptor_set::DescriptorSetDesc;
use descriptor::descriptor_set::DescriptorWrite;
use descriptor::descriptor_set::StdDescriptorPool;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use device::Device;
use device::DeviceOwned;
use image::ImageAccess;
use image::ImageLayout;
use image::ImageViewAccess;
use sampler::Sampler;
use sync::AccessFlagBits;
use sync::PipelineStages;
/// A simple immutable descriptor set.
///
/// It is named "simple" because creating such a descriptor set allocates from a pool, and because
/// it can't be modified once created. It is sufficient for most usages, but in some situations
/// you may wish to use something more optimized instead.
///
/// In order to build a `SimpleDescriptorSet`, you need to use a `SimpleDescriptorSetBuilder`. But
/// the easiest way is to use the `simple_descriptor_set!` macro.
///
/// The template parameter of the `SimpleDescriptorSet` is very complex, and you shouldn't try to
/// express it explicitely. If you want to store your descriptor set in a struct or in a `Vec` for
/// example, you are encouraged to turn `SimpleDescriptorSet` into a `Box<DescriptorSet>` or a
/// `Arc<DescriptorSet>`.
///
/// # Example
// TODO:
pub struct SimpleDescriptorSet<R, P = Arc<StdDescriptorPool>>
where P: DescriptorPool
{
inner: P::Alloc,
resources: R,
layout: Arc<UnsafeDescriptorSetLayout>,
}
impl<R, P> SimpleDescriptorSet<R, P>
where P: DescriptorPool
{
/// Returns the layout used to create this descriptor set.
#[inline]
pub fn set_layout(&self) -> &Arc<UnsafeDescriptorSetLayout> {
&self.layout
}
}
unsafe impl<R, P> DescriptorSet for SimpleDescriptorSet<R, P>
where P: DescriptorPool
{
#[inline]
fn inner(&self) -> &UnsafeDescriptorSet {
self.inner.inner()
}
#[inline]
fn buffers_list<'a>(&'a self) -> Box<Iterator<Item = &'a BufferAccess> + 'a> {
unimplemented!()
}
#[inline]
fn images_list<'a>(&'a self) -> Box<Iterator<Item = &'a ImageAccess> + 'a> {
unimplemented!()
}
}
unsafe impl<R, P> DescriptorSetDesc for SimpleDescriptorSet<R, P>
where P: DescriptorPool
{
#[inline]
fn num_bindings(&self) -> usize {
unimplemented!() // FIXME:
}
#[inline]
fn descriptor(&self, binding: usize) -> Option<DescriptorDesc> {
unimplemented!() // FIXME:
}
}
/// Builds a descriptor set in the form of a `SimpleDescriptorSet` object.
// TODO: more doc
#[macro_export]
macro_rules! simple_descriptor_set {
($layout:expr, $set_num:expr, {$($name:ident: $val:expr),*$(,)*}) => ({
#[allow(unused_imports)]
@ -122,267 +130,283 @@ macro_rules! simple_descriptor_set {
builder.build()
});
}
/// Prototype of a `SimpleDescriptorSet`.
///
/// > **Note**: You are encouraged to use the `simple_descriptor_set!` macro instead of
/// > manipulating these internals.
///
/// The template parameter `L` is the pipeline layout to use, and the template parameter `R` is
/// a complex unspecified type that represents the list of resources.
///
/// # Example
// TODO: example here
pub struct SimpleDescriptorSetBuilder<L, R> {
// The pipeline layout.
layout: L,
// Id of the set within the pipeline layout.
set_id: usize,
// The writes to perform on a descriptor set in order to put the resources in it.
writes: Vec<DescriptorWrite>,
// Holds the resources alive.
resources: R,
}
impl<L> SimpleDescriptorSetBuilder<L, ()> where L: PipelineLayoutAbstract {
/// Builds a new prototype for a `SimpleDescriptorSet`. Requires a reference to a pipeline
/// layout, and the id of the set within the layout.
///
/// # Panic
///
/// - Panics if the set id is out of range.
///
pub fn new(layout: L, set_id: usize) -> SimpleDescriptorSetBuilder<L, ()> {
assert!(layout.num_sets() > set_id);
let cap = layout.num_bindings_in_set(set_id).unwrap_or(0);
SimpleDescriptorSetBuilder {
layout: layout,
set_id: set_id,
writes: Vec::with_capacity(cap),
resources: (),
}
}
}
impl<L, R> SimpleDescriptorSetBuilder<L, R> where L: PipelineLayoutAbstract {
/// Builds a `SimpleDescriptorSet` from the builder.
pub fn build(self) -> SimpleDescriptorSet<R, Arc<StdDescriptorPool>> {
// TODO: check that we filled everything
let pool = Device::standard_descriptor_pool(self.layout.device());
let set_layout = self.layout.descriptor_set_layout(self.set_id).unwrap().clone(); // FIXME: error
let set = unsafe {
let mut set = pool.alloc(&set_layout).unwrap(); // FIXME: error
set.inner_mut().write(pool.device(), self.writes.into_iter());
set
};
SimpleDescriptorSet {
inner: set,
resources: self.resources,
layout: set_layout,
}
}
}
/// Trait implemented on buffers so that they can be appended to a simple descriptor set builder.
pub unsafe trait SimpleDescriptorSetBufferExt<L, R> {
/// The new type of the template parameter `R` of the builder.
type Out;
/// Appends the buffer to the `SimpleDescriptorSetBuilder`.
// TODO: return Result
fn add_me(self, i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>;
}
unsafe impl<L, R, T> SimpleDescriptorSetBufferExt<L, R> for T
where T: BufferAccess, L: PipelineLayoutAbstract
{
type Out = (R, SimpleDescriptorSetBuf<T>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>
{
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert!(desc.array_count == 1); // not implemented
}
/// Prototype of a `SimpleDescriptorSet`.
///
/// > **Note**: You are encouraged to use the `simple_descriptor_set!` macro instead of
/// > manipulating these internals.
///
/// The template parameter `L` is the pipeline layout to use, and the template parameter `R` is
/// a complex unspecified type that represents the list of resources.
///
/// # Example
// TODO: example here
pub struct SimpleDescriptorSetBuilder<L, R> {
// The pipeline layout.
layout: L,
// Id of the set within the pipeline layout.
set_id: usize,
// The writes to perform on a descriptor set in order to put the resources in it.
writes: Vec<DescriptorWrite>,
// Holds the resources alive.
resources: R,
}
impl<L> SimpleDescriptorSetBuilder<L, ()>
where L: PipelineLayoutAbstract
{
/// Builds a new prototype for a `SimpleDescriptorSet`. Requires a reference to a pipeline
/// layout, and the id of the set within the layout.
///
/// # Panic
///
/// - Panics if the set id is out of range.
///
pub fn new(layout: L, set_id: usize) -> SimpleDescriptorSetBuilder<L, ()> {
assert!(layout.num_sets() > set_id);
let cap = layout.num_bindings_in_set(set_id).unwrap_or(0);
SimpleDescriptorSetBuilder {
layout: layout,
set_id: set_id,
writes: Vec::with_capacity(cap),
resources: (),
}
}
}
impl<L, R> SimpleDescriptorSetBuilder<L, R>
where L: PipelineLayoutAbstract
{
/// Builds a `SimpleDescriptorSet` from the builder.
pub fn build(self) -> SimpleDescriptorSet<R, Arc<StdDescriptorPool>> {
// TODO: check that we filled everything
let pool = Device::standard_descriptor_pool(self.layout.device());
let set_layout = self.layout
.descriptor_set_layout(self.set_id)
.unwrap()
.clone(); // FIXME: error
let set = unsafe {
let mut set = pool.alloc(&set_layout).unwrap(); // FIXME: error
set.inner_mut()
.write(pool.device(), self.writes.into_iter());
set
};
SimpleDescriptorSet {
inner: set,
resources: self.resources,
layout: set_layout,
}
}
}
/// Trait implemented on buffers so that they can be appended to a simple descriptor set builder.
pub unsafe trait SimpleDescriptorSetBufferExt<L, R> {
/// The new type of the template parameter `R` of the builder.
type Out;
/// Appends the buffer to the `SimpleDescriptorSetBuilder`.
// TODO: return Result
fn add_me(self, i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>;
}
unsafe impl<L, R, T> SimpleDescriptorSetBufferExt<L, R> for T
where T: BufferAccess,
L: PipelineLayoutAbstract
{
type Out = (R, SimpleDescriptorSetBuf<T>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out> {
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert!(desc.array_count == 1); // not implemented
i.writes.push(match desc.ty.ty().unwrap() {
DescriptorType::UniformBuffer => unsafe {
DescriptorWrite::uniform_buffer(binding_id as u32, 0, &self)
},
DescriptorType::StorageBuffer => unsafe {
DescriptorWrite::storage_buffer(binding_id as u32, 0, &self)
},
_ => panic!()
});
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources, SimpleDescriptorSetBuf {
buffer: self,
write: !desc.readonly,
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
})
}
}
}
/// Trait implemented on images so that they can be appended to a simple descriptor set builder.
pub unsafe trait SimpleDescriptorSetImageExt<L, R> {
/// The new type of the template parameter `R` of the builder.
type Out;
/// Appends the image to the `SimpleDescriptorSetBuilder`.
// TODO: return Result
fn add_me(self, i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>;
}
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for T
where T: ImageViewAccess, L: PipelineLayoutAbstract
{
type Out = (R, SimpleDescriptorSetImg<T>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>
{
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert!(desc.array_count == 1); // not implemented
DescriptorType::UniformBuffer => unsafe {
DescriptorWrite::uniform_buffer(binding_id as u32, 0, &self)
},
DescriptorType::StorageBuffer => unsafe {
DescriptorWrite::storage_buffer(binding_id as u32, 0, &self)
},
_ => panic!(),
});
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources,
SimpleDescriptorSetBuf {
buffer: self,
write: !desc.readonly,
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
}),
}
}
}
/// Trait implemented on images so that they can be appended to a simple descriptor set builder.
pub unsafe trait SimpleDescriptorSetImageExt<L, R> {
/// The new type of the template parameter `R` of the builder.
type Out;
/// Appends the image to the `SimpleDescriptorSetBuilder`.
// TODO: return Result
fn add_me(self, i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>;
}
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for T
where T: ImageViewAccess,
L: PipelineLayoutAbstract
{
type Out = (R, SimpleDescriptorSetImg<T>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out> {
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert!(desc.array_count == 1); // not implemented
i.writes.push(match desc.ty.ty().unwrap() {
DescriptorType::SampledImage => {
DescriptorWrite::sampled_image(binding_id as u32, 0, &self)
},
DescriptorType::StorageImage => {
DescriptorWrite::storage_image(binding_id as u32, 0, &self)
},
DescriptorType::InputAttachment => {
DescriptorWrite::input_attachment(binding_id as u32, 0, &self)
},
_ => panic!()
});
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources, SimpleDescriptorSetImg {
image: self,
sampler: None,
write: !desc.readonly,
first_mipmap: 0, // FIXME:
num_mipmaps: 1, // FIXME:
first_layer: 0, // FIXME:
num_layers: 1, // FIXME:
layout: ImageLayout::General, // FIXME:
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
})
}
}
}
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for (T, Arc<Sampler>)
where T: ImageViewAccess, L: PipelineLayoutAbstract
{
type Out = (R, SimpleDescriptorSetImg<T>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>
{
let image_view = self.0;
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert!(desc.array_count == 1); // not implemented
DescriptorType::SampledImage => {
DescriptorWrite::sampled_image(binding_id as u32, 0, &self)
},
DescriptorType::StorageImage => {
DescriptorWrite::storage_image(binding_id as u32, 0, &self)
},
DescriptorType::InputAttachment => {
DescriptorWrite::input_attachment(binding_id as u32, 0, &self)
},
_ => panic!(),
});
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources,
SimpleDescriptorSetImg {
image: self,
sampler: None,
write: !desc.readonly,
first_mipmap: 0, // FIXME:
num_mipmaps: 1, // FIXME:
first_layer: 0, // FIXME:
num_layers: 1, // FIXME:
layout: ImageLayout::General, // FIXME:
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
}),
}
}
}
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for (T, Arc<Sampler>)
where T: ImageViewAccess,
L: PipelineLayoutAbstract
{
type Out = (R, SimpleDescriptorSetImg<T>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out> {
let image_view = self.0;
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert!(desc.array_count == 1); // not implemented
i.writes.push(match desc.ty.ty().unwrap() {
DescriptorType::CombinedImageSampler => {
DescriptorWrite::combined_image_sampler(binding_id as u32, 0, &self.1, &image_view)
},
_ => panic!()
});
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources, SimpleDescriptorSetImg {
image: image_view,
sampler: Some(self.1),
write: !desc.readonly,
first_mipmap: 0, // FIXME:
num_mipmaps: 1, // FIXME:
first_layer: 0, // FIXME:
num_layers: 1, // FIXME:
layout: ImageLayout::General, // FIXME:
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
})
}
}
}
// TODO: DRY
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for Vec<(T, Arc<Sampler>)>
where T: ImageViewAccess, L: PipelineLayoutAbstract
{
type Out = (R, Vec<SimpleDescriptorSetImg<T>>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out>
{
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert_eq!(desc.array_count as usize, self.len()); // not implemented
let mut imgs = Vec::new();
for (num, (img, sampler)) in self.into_iter().enumerate() {
DescriptorType::CombinedImageSampler => {
DescriptorWrite::combined_image_sampler(binding_id as u32,
0,
&self.1,
&image_view)
},
_ => panic!(),
});
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources,
SimpleDescriptorSetImg {
image: image_view,
sampler: Some(self.1),
write: !desc.readonly,
first_mipmap: 0, // FIXME:
num_mipmaps: 1, // FIXME:
first_layer: 0, // FIXME:
num_layers: 1, // FIXME:
layout: ImageLayout::General, // FIXME:
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
}),
}
}
}
// TODO: DRY
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for Vec<(T, Arc<Sampler>)>
where T: ImageViewAccess,
L: PipelineLayoutAbstract
{
type Out = (R, Vec<SimpleDescriptorSetImg<T>>);
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
-> SimpleDescriptorSetBuilder<L, Self::Out> {
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
assert_eq!(set_id, i.set_id); // TODO: Result instead
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
assert_eq!(desc.array_count as usize, self.len()); // not implemented
let mut imgs = Vec::new();
for (num, (img, sampler)) in self.into_iter().enumerate() {
i.writes.push(match desc.ty.ty().unwrap() {
DescriptorType::CombinedImageSampler => {
DescriptorWrite::combined_image_sampler(binding_id as u32, num as u32,
&sampler, &img)
},
_ => panic!()
});
imgs.push(SimpleDescriptorSetImg {
image: img,
sampler: Some(sampler),
write: !desc.readonly,
first_mipmap: 0, // FIXME:
num_mipmaps: 1, // FIXME:
first_layer: 0, // FIXME:
num_layers: 1, // FIXME:
layout: ImageLayout::General, // FIXME:
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
});
}
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources, imgs),
}
}
}
DescriptorType::CombinedImageSampler => {
DescriptorWrite::combined_image_sampler(binding_id as u32,
num as u32,
&sampler,
&img)
},
_ => panic!(),
});
imgs.push(SimpleDescriptorSetImg {
image: img,
sampler: Some(sampler),
write: !desc.readonly,
first_mipmap: 0, // FIXME:
num_mipmaps: 1, // FIXME:
first_layer: 0, // FIXME:
num_layers: 1, // FIXME:
layout: ImageLayout::General, // FIXME:
stage: PipelineStages::none(), // FIXME:
access: AccessFlagBits::none(), // FIXME:
});
}
SimpleDescriptorSetBuilder {
layout: i.layout,
set_id: i.set_id,
writes: i.writes,
resources: (i.resources, imgs),
}
}
}
/*
/// Internal trait related to the `SimpleDescriptorSet` system.
pub unsafe trait SimpleDescriptorSetResourcesCollection {
@ -394,16 +418,16 @@ unsafe impl SimpleDescriptorSetResourcesCollection for () {
#[inline]
fn add_transition<'a>(&'a self, _: &mut CommandsListSink<'a>) {
}
}*/
/// Internal object related to the `SimpleDescriptorSet` system.
pub struct SimpleDescriptorSetBuf<B> {
buffer: B,
write: bool,
stage: PipelineStages,
access: AccessFlagBits,
}
}*/
/// Internal object related to the `SimpleDescriptorSet` system.
pub struct SimpleDescriptorSetBuf<B> {
buffer: B,
write: bool,
stage: PipelineStages,
access: AccessFlagBits,
}
/*unsafe impl<B> SimpleDescriptorSetResourcesCollection for SimpleDescriptorSetBuf<B>
where B: BufferAccess
{
@ -425,16 +449,18 @@ pub struct SimpleDescriptorSetBuf<B> {
sink.add_buffer_transition(&self.buffer, 0, self.buffer.size(), self.write, stages, access);
}
}*/
/// Internal object related to the `SimpleDescriptorSet` system.
pub struct SimpleDescriptorSetBufView<V> where V: BufferViewRef {
view: V,
write: bool,
stage: PipelineStages,
access: AccessFlagBits,
}
}*/
/// Internal object related to the `SimpleDescriptorSet` system.
pub struct SimpleDescriptorSetBufView<V>
where V: BufferViewRef
{
view: V,
write: bool,
stage: PipelineStages,
access: AccessFlagBits,
}
/*unsafe impl<V> SimpleDescriptorSetResourcesCollection for SimpleDescriptorSetBufView<V>
where V: BufferViewRef, V::BufferAccess: BufferAccess
{
@ -457,22 +483,22 @@ pub struct SimpleDescriptorSetBufView<V> where V: BufferViewRef {
sink.add_buffer_transition(self.view.view().buffer(), 0, self.view.view().buffer().size(),
self.write, stages, access);
}
}*/
/// Internal object related to the `SimpleDescriptorSet` system.
pub struct SimpleDescriptorSetImg<I> {
image: I,
sampler: Option<Arc<Sampler>>,
write: bool,
first_mipmap: u32,
num_mipmaps: u32,
first_layer: u32,
num_layers: u32,
layout: ImageLayout,
stage: PipelineStages,
access: AccessFlagBits,
}
}*/
/// Internal object related to the `SimpleDescriptorSet` system.
pub struct SimpleDescriptorSetImg<I> {
image: I,
sampler: Option<Arc<Sampler>>,
write: bool,
first_mipmap: u32,
num_mipmaps: u32,
first_layer: u32,
num_layers: u32,
layout: ImageLayout,
stage: PipelineStages,
access: AccessFlagBits,
}
/*unsafe impl<I> SimpleDescriptorSetResourcesCollection for SimpleDescriptorSetImg<I>
where I: ImageViewAccess
{
@ -509,4 +535,4 @@ unsafe impl<A, B> SimpleDescriptorSetResourcesCollection for (A, B)
self.0.add_transition(sink);
self.1.add_transition(sink);
}
}*/
}*/

View File

@ -1,172 +1,172 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::Arc;
use std::sync::Mutex;
use device::Device;
use device::DeviceOwned;
use descriptor::descriptor_set::DescriptorsCount;
use descriptor::descriptor_set::DescriptorPool;
use descriptor::descriptor_set::DescriptorPoolAlloc;
use descriptor::descriptor_set::DescriptorPoolAllocError;
use descriptor::descriptor_set::UnsafeDescriptorPool;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use OomError;
/// Standard implementation of a descriptor pool.
///
/// Whenever a set is allocated, this implementation will try to find a pool that has some space
/// for it. If there is one, allocate from it. If there is none, create a new pool whose capacity
/// is 40 sets and 40 times the requested descriptors. This number is arbitrary.
pub struct StdDescriptorPool {
device: Arc<Device>,
pools: Mutex<Vec<Arc<Mutex<Pool>>>>,
}
struct Pool {
pool: UnsafeDescriptorPool,
remaining_capacity: DescriptorsCount,
remaining_sets_count: u32,
}
impl StdDescriptorPool {
/// Builds a new `StdDescriptorPool`.
pub fn new(device: Arc<Device>) -> StdDescriptorPool {
StdDescriptorPool {
device: device,
pools: Mutex::new(Vec::new()),
}
}
}
/// A descriptor set allocated from a `StdDescriptorPool`.
pub struct StdDescriptorPoolAlloc {
pool: Arc<Mutex<Pool>>,
// The set. Inside an option so that we can extract it in the destructor.
set: Option<UnsafeDescriptorSet>,
// We need to keep track of this count in order to add it back to the capacity when freeing.
descriptors: DescriptorsCount,
}
unsafe impl DescriptorPool for Arc<StdDescriptorPool> {
type Alloc = StdDescriptorPoolAlloc;
// TODO: eventually use a lock-free algorithm?
fn alloc(&self, layout: &UnsafeDescriptorSetLayout)
-> Result<StdDescriptorPoolAlloc, OomError>
{
let mut pools = self.pools.lock().unwrap();
// Try find an existing pool with some free space.
for pool_arc in pools.iter_mut() {
let mut pool = pool_arc.lock().unwrap();
if pool.remaining_sets_count == 0 {
continue;
}
if !(pool.remaining_capacity >= *layout.descriptors_count()) {
continue;
}
// Note that we decrease these values *before* trying to allocate from the pool.
// If allocating from the pool results in an error, we just ignore it. In order to
// avoid trying the same failing pool every time, we "pollute" it by reducing the
// available space.
pool.remaining_sets_count -= 1;
pool.remaining_capacity -= *layout.descriptors_count();
let alloc = unsafe {
use OomError;
use descriptor::descriptor_set::DescriptorPool;
use descriptor::descriptor_set::DescriptorPoolAlloc;
use descriptor::descriptor_set::DescriptorPoolAllocError;
use descriptor::descriptor_set::DescriptorsCount;
use descriptor::descriptor_set::UnsafeDescriptorPool;
use descriptor::descriptor_set::UnsafeDescriptorSet;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use device::Device;
use device::DeviceOwned;
/// Standard implementation of a descriptor pool.
///
/// Whenever a set is allocated, this implementation will try to find a pool that has some space
/// for it. If there is one, allocate from it. If there is none, create a new pool whose capacity
/// is 40 sets and 40 times the requested descriptors. This number is arbitrary.
pub struct StdDescriptorPool {
device: Arc<Device>,
pools: Mutex<Vec<Arc<Mutex<Pool>>>>,
}
struct Pool {
pool: UnsafeDescriptorPool,
remaining_capacity: DescriptorsCount,
remaining_sets_count: u32,
}
impl StdDescriptorPool {
/// Builds a new `StdDescriptorPool`.
pub fn new(device: Arc<Device>) -> StdDescriptorPool {
StdDescriptorPool {
device: device,
pools: Mutex::new(Vec::new()),
}
}
}
/// A descriptor set allocated from a `StdDescriptorPool`.
pub struct StdDescriptorPoolAlloc {
pool: Arc<Mutex<Pool>>,
// The set. Inside an option so that we can extract it in the destructor.
set: Option<UnsafeDescriptorSet>,
// We need to keep track of this count in order to add it back to the capacity when freeing.
descriptors: DescriptorsCount,
}
unsafe impl DescriptorPool for Arc<StdDescriptorPool> {
type Alloc = StdDescriptorPoolAlloc;
// TODO: eventually use a lock-free algorithm?
fn alloc(&self, layout: &UnsafeDescriptorSetLayout)
-> Result<StdDescriptorPoolAlloc, OomError> {
let mut pools = self.pools.lock().unwrap();
// Try find an existing pool with some free space.
for pool_arc in pools.iter_mut() {
let mut pool = pool_arc.lock().unwrap();
if pool.remaining_sets_count == 0 {
continue;
}
if !(pool.remaining_capacity >= *layout.descriptors_count()) {
continue;
}
// Note that we decrease these values *before* trying to allocate from the pool.
// If allocating from the pool results in an error, we just ignore it. In order to
// avoid trying the same failing pool every time, we "pollute" it by reducing the
// available space.
pool.remaining_sets_count -= 1;
pool.remaining_capacity -= *layout.descriptors_count();
let alloc = unsafe {
match pool.pool.alloc(Some(layout)) {
Ok(mut sets) => sets.next().unwrap(),
// An error can happen if we're out of memory, or if the pool is fragmented.
// We handle these errors by just ignoring this pool and trying the next ones.
// An error can happen if we're out of memory, or if the pool is fragmented.
// We handle these errors by just ignoring this pool and trying the next ones.
Err(_) => continue,
}
};
return Ok(StdDescriptorPoolAlloc {
pool: pool_arc.clone(),
set: Some(alloc),
descriptors: *layout.descriptors_count(),
});
}
// No existing pool can be used. Create a new one.
// We use an arbitrary number of 40 sets and 40 times the requested descriptors.
let count = layout.descriptors_count().clone() * 40;
// Failure to allocate a new pool results in an error for the whole function because
// there's no way we can recover from that.
let mut new_pool = try!(UnsafeDescriptorPool::new(self.device.clone(), &count, 40, true));
let alloc = unsafe {
}
};
return Ok(StdDescriptorPoolAlloc {
pool: pool_arc.clone(),
set: Some(alloc),
descriptors: *layout.descriptors_count(),
});
}
// No existing pool can be used. Create a new one.
// We use an arbitrary number of 40 sets and 40 times the requested descriptors.
let count = layout.descriptors_count().clone() * 40;
// Failure to allocate a new pool results in an error for the whole function because
// there's no way we can recover from that.
let mut new_pool = UnsafeDescriptorPool::new(self.device.clone(), &count, 40, true)?;
let alloc = unsafe {
match new_pool.alloc(Some(layout)) {
Ok(mut sets) => sets.next().unwrap(),
Err(DescriptorPoolAllocError::OutOfHostMemory) => {
return Err(OomError::OutOfHostMemory);
Err(DescriptorPoolAllocError::OutOfHostMemory) => {
return Err(OomError::OutOfHostMemory);
},
Err(DescriptorPoolAllocError::OutOfDeviceMemory) => {
return Err(OomError::OutOfDeviceMemory);
Err(DescriptorPoolAllocError::OutOfDeviceMemory) => {
return Err(OomError::OutOfDeviceMemory);
},
// A fragmented pool error can't happen at the first ever allocation.
// A fragmented pool error can't happen at the first ever allocation.
Err(DescriptorPoolAllocError::FragmentedPool) => unreachable!(),
// Out of pool memory cannot happen at the first ever allocation.
// Out of pool memory cannot happen at the first ever allocation.
Err(DescriptorPoolAllocError::OutOfPoolMemory) => unreachable!(),
}
};
let pool_obj = Arc::new(Mutex::new(Pool {
pool: new_pool,
remaining_capacity: count - *layout.descriptors_count(),
remaining_sets_count: 40 - 1,
}));
pools.push(pool_obj.clone());
Ok(StdDescriptorPoolAlloc {
pool: pool_obj,
set: Some(alloc),
descriptors: *layout.descriptors_count(),
})
}
}
unsafe impl DeviceOwned for StdDescriptorPool {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl DescriptorPoolAlloc for StdDescriptorPoolAlloc {
#[inline]
fn inner(&self) -> &UnsafeDescriptorSet {
self.set.as_ref().unwrap()
}
#[inline]
fn inner_mut(&mut self) -> &mut UnsafeDescriptorSet {
self.set.as_mut().unwrap()
}
}
impl Drop for StdDescriptorPoolAlloc {
// This is the destructor of a single allocation (not of the whole pool).
fn drop(&mut self) {
unsafe {
let mut pool = self.pool.lock().unwrap();
pool.pool.free(self.set.take()).unwrap();
// Add back the capacity only after freeing, in case of a panic during the free.
pool.remaining_sets_count += 1;
pool.remaining_capacity += self.descriptors;
}
}
}
}
};
let pool_obj = Arc::new(Mutex::new(Pool {
pool: new_pool,
remaining_capacity: count -
*layout.descriptors_count(),
remaining_sets_count: 40 - 1,
}));
pools.push(pool_obj.clone());
Ok(StdDescriptorPoolAlloc {
pool: pool_obj,
set: Some(alloc),
descriptors: *layout.descriptors_count(),
})
}
}
unsafe impl DeviceOwned for StdDescriptorPool {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl DescriptorPoolAlloc for StdDescriptorPoolAlloc {
#[inline]
fn inner(&self) -> &UnsafeDescriptorSet {
self.set.as_ref().unwrap()
}
#[inline]
fn inner_mut(&mut self) -> &mut UnsafeDescriptorSet {
self.set.as_mut().unwrap()
}
}
impl Drop for StdDescriptorPoolAlloc {
// This is the destructor of a single allocation (not of the whole pool).
fn drop(&mut self) {
unsafe {
let mut pool = self.pool.lock().unwrap();
pool.pool.free(self.set.take()).unwrap();
// Add back the capacity only after freeing, in case of a panic during the free.
pool.remaining_sets_count += 1;
pool.remaining_capacity += self.descriptors;
}
}
}

View File

@ -7,6 +7,7 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::cmp;
use std::error;
use std::fmt;
@ -15,7 +16,6 @@ use std::ops;
use std::ptr;
use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
use smallvec::SmallVec;
use buffer::BufferAccess;
use buffer::BufferInner;
@ -27,9 +27,9 @@ use device::DeviceOwned;
use image::ImageViewAccess;
use sampler::Sampler;
use check_errors;
use OomError;
use VulkanObject;
use check_errors;
use vk;
/// A pool from which descriptor sets can be allocated.
@ -238,8 +238,8 @@ impl UnsafeDescriptorPool {
/// - Panics if `max_sets` is 0.
///
pub fn new(device: Arc<Device>, count: &DescriptorsCount, max_sets: u32,
free_descriptor_set_bit: bool) -> Result<UnsafeDescriptorPool, OomError>
{
free_descriptor_set_bit: bool)
-> Result<UnsafeDescriptorPool, OomError> {
let vk = device.pointers();
assert_ne!(max_sets, 0, "The maximum number of sets can't be 0");
@ -259,17 +259,23 @@ impl UnsafeDescriptorPool {
elem!(uniform_buffer, vk::DESCRIPTOR_TYPE_UNIFORM_BUFFER);
elem!(storage_buffer, vk::DESCRIPTOR_TYPE_STORAGE_BUFFER);
elem!(uniform_buffer_dynamic, vk::DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
elem!(storage_buffer_dynamic, vk::DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
elem!(uniform_texel_buffer, vk::DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
elem!(storage_texel_buffer, vk::DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
elem!(uniform_buffer_dynamic,
vk::DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
elem!(storage_buffer_dynamic,
vk::DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
elem!(uniform_texel_buffer,
vk::DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
elem!(storage_texel_buffer,
vk::DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
elem!(sampled_image, vk::DESCRIPTOR_TYPE_SAMPLED_IMAGE);
elem!(storage_image, vk::DESCRIPTOR_TYPE_STORAGE_IMAGE);
elem!(sampler, vk::DESCRIPTOR_TYPE_SAMPLER);
elem!(combined_image_sampler, vk::DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
elem!(combined_image_sampler,
vk::DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
elem!(input_attachment, vk::DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
assert!(!pool_sizes.is_empty(), "All the descriptors count of a pool are 0");
assert!(!pool_sizes.is_empty(),
"All the descriptors count of a pool are 0");
let pool = unsafe {
let infos = vk::DescriptorPoolCreateInfo {
@ -286,15 +292,17 @@ impl UnsafeDescriptorPool {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateDescriptorPool(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateDescriptorPool(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(UnsafeDescriptorPool {
pool: pool,
device: device.clone(),
})
pool: pool,
device: device.clone(),
})
}
/// Allocates descriptor sets from the pool, one for each layout.
@ -321,19 +329,23 @@ impl UnsafeDescriptorPool {
-> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError>
where I: IntoIterator<Item = &'l UnsafeDescriptorSetLayout>
{
let layouts: SmallVec<[_; 8]> = layouts.into_iter().map(|l| {
assert_eq!(self.device.internal_object(), l.device().internal_object(),
"Tried to allocate from a pool with a set layout of a different device");
l.internal_object()
}).collect();
let layouts: SmallVec<[_; 8]> = layouts
.into_iter()
.map(|l| {
assert_eq!(self.device.internal_object(),
l.device().internal_object(),
"Tried to allocate from a pool with a set layout of a different \
device");
l.internal_object()
})
.collect();
self.alloc_impl(&layouts)
}
// Actual implementation of `alloc`. Separated so that it is not inlined.
unsafe fn alloc_impl(&mut self, layouts: &SmallVec<[vk::DescriptorSetLayout; 8]>)
-> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError>
{
-> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError> {
let num = layouts.len();
if num == 0 {
@ -351,8 +363,8 @@ impl UnsafeDescriptorPool {
let mut output = Vec::with_capacity(num);
let vk = self.device.pointers();
let ret = vk.AllocateDescriptorSets(self.device.internal_object(), &infos,
output.as_mut_ptr());
let ret =
vk.AllocateDescriptorSets(self.device.internal_object(), &infos, output.as_mut_ptr());
// According to the specs, because `VK_ERROR_FRAGMENTED_POOL` was added after version
// 1.0 of Vulkan, any negative return value except out-of-memory errors must be
@ -370,14 +382,12 @@ impl UnsafeDescriptorPool {
c if (c as i32) < 0 => {
return Err(DescriptorPoolAllocError::FragmentedPool);
},
_ => ()
_ => (),
};
output.set_len(num);
Ok(UnsafeDescriptorPoolAllocIter {
sets: output.into_iter(),
})
Ok(UnsafeDescriptorPoolAllocIter { sets: output.into_iter() })
}
/// Frees some descriptor sets.
@ -406,11 +416,12 @@ impl UnsafeDescriptorPool {
// Actual implementation of `free`. Separated so that it is not inlined.
unsafe fn free_impl(&mut self, sets: &SmallVec<[vk::DescriptorSet; 8]>)
-> Result<(), OomError>
{
-> Result<(), OomError> {
let vk = self.device.pointers();
try!(check_errors(vk.FreeDescriptorSets(self.device.internal_object(), self.pool,
sets.len() as u32, sets.as_ptr())));
check_errors(vk.FreeDescriptorSets(self.device.internal_object(),
self.pool,
sets.len() as u32,
sets.as_ptr()))?;
Ok(())
}
@ -419,8 +430,9 @@ impl UnsafeDescriptorPool {
/// This destroys all descriptor sets and empties the pool.
pub unsafe fn reset(&mut self) -> Result<(), OomError> {
let vk = self.device.pointers();
try!(check_errors(vk.ResetDescriptorPool(self.device.internal_object(), self.pool,
0 /* reserved flags */)));
check_errors(vk.ResetDescriptorPool(self.device.internal_object(),
self.pool,
0 /* reserved flags */))?;
Ok(())
}
}
@ -479,7 +491,7 @@ impl error::Error for DescriptorPoolAllocError {
},
DescriptorPoolAllocError::OutOfPoolMemory => {
"there is no more space available in the descriptor pool"
}
},
}
}
}
@ -502,9 +514,7 @@ impl Iterator for UnsafeDescriptorPoolAllocIter {
#[inline]
fn next(&mut self) -> Option<UnsafeDescriptorSet> {
self.sets.next().map(|s| UnsafeDescriptorSet {
set: s,
})
self.sets.next().map(|s| UnsafeDescriptorSet { set: s })
}
#[inline]
@ -581,17 +591,17 @@ impl UnsafeDescriptorSet {
// The whole struct that wr write here is valid, except for pImageInfo, pBufferInfo
// and pTexelBufferView which are placeholder values.
raw_writes.push(vk::WriteDescriptorSet {
sType: vk::STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
pNext: ptr::null(),
dstSet: self.set,
dstBinding: indiv_write.binding,
dstArrayElement: indiv_write.first_array_element,
descriptorCount: indiv_write.inner.len() as u32,
descriptorType: indiv_write.ty() as u32,
pImageInfo: ptr::null(),
pBufferInfo: ptr::null(),
pTexelBufferView: ptr::null(),
});
sType: vk::STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
pNext: ptr::null(),
dstSet: self.set,
dstBinding: indiv_write.binding,
dstArrayElement: indiv_write.first_array_element,
descriptorCount: indiv_write.inner.len() as u32,
descriptorType: indiv_write.ty() as u32,
pImageInfo: ptr::null(),
pBufferInfo: ptr::null(),
pTexelBufferView: ptr::null(),
});
match indiv_write.inner[0] {
DescriptorWriteInner::Sampler(_) |
@ -624,53 +634,53 @@ impl UnsafeDescriptorSet {
DescriptorWriteInner::UniformBuffer(buffer, offset, size) |
DescriptorWriteInner::DynamicUniformBuffer(buffer, offset, size) => {
buffer_descriptors.push(vk::DescriptorBufferInfo {
buffer: buffer,
offset: offset as u64,
range: size as u64,
});
buffer: buffer,
offset: offset as u64,
range: size as u64,
});
},
DescriptorWriteInner::StorageBuffer(buffer, offset, size) |
DescriptorWriteInner::DynamicStorageBuffer(buffer, offset, size) => {
buffer_descriptors.push(vk::DescriptorBufferInfo {
buffer: buffer,
offset: offset as u64,
range: size as u64,
});
buffer: buffer,
offset: offset as u64,
range: size as u64,
});
},
DescriptorWriteInner::Sampler(sampler) => {
image_descriptors.push(vk::DescriptorImageInfo {
sampler: sampler,
imageView: 0,
imageLayout: 0,
});
sampler: sampler,
imageView: 0,
imageLayout: 0,
});
},
DescriptorWriteInner::CombinedImageSampler(sampler, view, layout) => {
image_descriptors.push(vk::DescriptorImageInfo {
sampler: sampler,
imageView: view,
imageLayout: layout,
});
sampler: sampler,
imageView: view,
imageLayout: layout,
});
},
DescriptorWriteInner::StorageImage(view, layout) => {
image_descriptors.push(vk::DescriptorImageInfo {
sampler: 0,
imageView: view,
imageLayout: layout,
});
sampler: 0,
imageView: view,
imageLayout: layout,
});
},
DescriptorWriteInner::SampledImage(view, layout) => {
image_descriptors.push(vk::DescriptorImageInfo {
sampler: 0,
imageView: view,
imageLayout: layout,
});
sampler: 0,
imageView: view,
imageLayout: layout,
});
},
DescriptorWriteInner::InputAttachment(view, layout) => {
image_descriptors.push(vk::DescriptorImageInfo {
sampler: 0,
imageView: view,
imageLayout: layout,
});
sampler: 0,
imageView: view,
imageLayout: layout,
});
},
DescriptorWriteInner::UniformTexelBuffer(view) |
DescriptorWriteInner::StorageTexelBuffer(view) => {
@ -685,25 +695,28 @@ impl UnsafeDescriptorSet {
for (i, write) in raw_writes.iter_mut().enumerate() {
write.pImageInfo = match raw_writes_img_infos[i] {
Some(off) => image_descriptors.as_ptr().offset(off as isize),
None => ptr::null()
None => ptr::null(),
};
write.pBufferInfo = match raw_writes_buf_infos[i] {
Some(off) => buffer_descriptors.as_ptr().offset(off as isize),
None => ptr::null()
None => ptr::null(),
};
write.pTexelBufferView = match raw_writes_buf_view_infos[i] {
Some(off) => buffer_views_descriptors.as_ptr().offset(off as isize),
None => ptr::null()
None => ptr::null(),
};
}
// It is forbidden to call `vkUpdateDescriptorSets` with 0 writes, so we need to perform
// this emptiness check.
if !raw_writes.is_empty() {
vk.UpdateDescriptorSets(device.internal_object(), raw_writes.len() as u32,
raw_writes.as_ptr(), 0, ptr::null());
vk.UpdateDescriptorSets(device.internal_object(),
raw_writes.len() as u32,
raw_writes.as_ptr(),
0,
ptr::null());
}
}
}
@ -761,9 +774,10 @@ impl DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!({
let layout = image.descriptor_set_storage_image_layout() as u32;
DescriptorWriteInner::StorageImage(image.inner().internal_object(), layout)
}),
let layout = image.descriptor_set_storage_image_layout() as u32;
DescriptorWriteInner::StorageImage(image.inner().internal_object(),
layout)
}),
}
}
@ -772,7 +786,7 @@ impl DescriptorWrite {
DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!(DescriptorWriteInner::Sampler(sampler.internal_object()))
inner: smallvec!(DescriptorWriteInner::Sampler(sampler.internal_object())),
}
}
@ -784,30 +798,41 @@ impl DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!({
let layout = image.descriptor_set_sampled_image_layout() as u32;
DescriptorWriteInner::SampledImage(image.inner().internal_object(), layout)
}),
let layout = image.descriptor_set_sampled_image_layout() as u32;
DescriptorWriteInner::SampledImage(image.inner().internal_object(),
layout)
}),
}
}
#[inline]
pub fn combined_image_sampler<I>(binding: u32, array_element: u32, sampler: &Arc<Sampler>, image: &I) -> DescriptorWrite
pub fn combined_image_sampler<I>(binding: u32, array_element: u32, sampler: &Arc<Sampler>,
image: &I)
-> DescriptorWrite
where I: ImageViewAccess
{
DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!({
let layout = image.descriptor_set_combined_image_sampler_layout() as u32;
DescriptorWriteInner::CombinedImageSampler(sampler.internal_object(), image.inner().internal_object(), layout)
}),
let layout =
image.descriptor_set_combined_image_sampler_layout() as u32;
DescriptorWriteInner::CombinedImageSampler(sampler
.internal_object(),
image
.inner()
.internal_object(),
layout)
}),
}
}
#[inline]
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &Arc<BufferView<F, B>>) -> DescriptorWrite
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
view: &Arc<BufferView<F, B>>)
-> DescriptorWrite
where B: BufferAccess,
F: 'static + Send + Sync,
F: 'static + Send + Sync
{
assert!(view.uniform_texel_buffer());
@ -819,9 +844,11 @@ impl DescriptorWrite {
}
#[inline]
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &Arc<BufferView<F, B>>) -> DescriptorWrite
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
view: &Arc<BufferView<F, B>>)
-> DescriptorWrite
where B: BufferAccess + 'static,
F: 'static + Send + Sync,
F: 'static + Send + Sync
{
assert!(view.storage_texel_buffer());
@ -843,8 +870,10 @@ impl DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!({
DescriptorWriteInner::UniformBuffer(buffer.internal_object(), offset, size)
}),
DescriptorWriteInner::UniformBuffer(buffer.internal_object(),
offset,
size)
}),
}
}
@ -859,13 +888,16 @@ impl DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!({
DescriptorWriteInner::StorageBuffer(buffer.internal_object(), offset, size)
}),
DescriptorWriteInner::StorageBuffer(buffer.internal_object(),
offset,
size)
}),
}
}
#[inline]
pub unsafe fn dynamic_uniform_buffer<B>(binding: u32, array_element: u32, buffer: &B) -> DescriptorWrite
pub unsafe fn dynamic_uniform_buffer<B>(binding: u32, array_element: u32, buffer: &B)
-> DescriptorWrite
where B: BufferAccess
{
let size = buffer.size();
@ -875,12 +907,14 @@ impl DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!(DescriptorWriteInner::DynamicUniformBuffer(buffer.internal_object(),
offset, size)),
offset,
size)),
}
}
#[inline]
pub unsafe fn dynamic_storage_buffer<B>(binding: u32, array_element: u32, buffer: &B) -> DescriptorWrite
pub unsafe fn dynamic_storage_buffer<B>(binding: u32, array_element: u32, buffer: &B)
-> DescriptorWrite
where B: BufferAccess
{
let size = buffer.size();
@ -890,7 +924,8 @@ impl DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!(DescriptorWriteInner::DynamicStorageBuffer(buffer.internal_object(),
offset, size)),
offset,
size)),
}
}
@ -902,9 +937,12 @@ impl DescriptorWrite {
binding: binding,
first_array_element: array_element,
inner: smallvec!({
let layout = image.descriptor_set_input_attachment_layout() as u32;
DescriptorWriteInner::InputAttachment(image.inner().internal_object(), layout)
}),
let layout = image.descriptor_set_input_attachment_layout() as u32;
DescriptorWriteInner::InputAttachment(image
.inner()
.internal_object(),
layout)
}),
}
}
@ -913,15 +951,18 @@ impl DescriptorWrite {
pub fn ty(&self) -> DescriptorType {
match self.inner[0] {
DescriptorWriteInner::Sampler(_) => DescriptorType::Sampler,
DescriptorWriteInner::CombinedImageSampler(_, _, _) => DescriptorType::CombinedImageSampler,
DescriptorWriteInner::CombinedImageSampler(_, _, _) =>
DescriptorType::CombinedImageSampler,
DescriptorWriteInner::SampledImage(_, _) => DescriptorType::SampledImage,
DescriptorWriteInner::StorageImage(_, _) => DescriptorType::StorageImage,
DescriptorWriteInner::UniformTexelBuffer(_) => DescriptorType::UniformTexelBuffer,
DescriptorWriteInner::StorageTexelBuffer(_) => DescriptorType::StorageTexelBuffer,
DescriptorWriteInner::UniformBuffer(_, _, _) => DescriptorType::UniformBuffer,
DescriptorWriteInner::StorageBuffer(_, _, _) => DescriptorType::StorageBuffer,
DescriptorWriteInner::DynamicUniformBuffer(_, _, _) => DescriptorType::UniformBufferDynamic,
DescriptorWriteInner::DynamicStorageBuffer(_, _, _) => DescriptorType::StorageBufferDynamic,
DescriptorWriteInner::DynamicUniformBuffer(_, _, _) =>
DescriptorType::UniformBufferDynamic,
DescriptorWriteInner::DynamicStorageBuffer(_, _, _) =>
DescriptorType::StorageBufferDynamic,
DescriptorWriteInner::InputAttachment(_, _) => DescriptorType::InputAttachment,
}
}
@ -929,22 +970,22 @@ impl DescriptorWrite {
#[cfg(test)]
mod tests {
use std::iter;
use descriptor::descriptor::DescriptorBufferContentDesc;
use descriptor::descriptor::DescriptorBufferDesc;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::DescriptorDescTy;
use descriptor::descriptor::DescriptorBufferDesc;
use descriptor::descriptor::DescriptorBufferContentDesc;
use descriptor::descriptor::ShaderStages;
use descriptor::descriptor_set::DescriptorsCount;
use descriptor::descriptor_set::UnsafeDescriptorPool;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use std::iter;
#[test]
fn pool_create() {
let (device, _) = gfx_dev_and_queue!();
let desc = DescriptorsCount {
uniform_buffer: 1,
.. DescriptorsCount::zero()
..DescriptorsCount::zero()
};
let _ = UnsafeDescriptorPool::new(device, &desc, 10, false).unwrap();
@ -956,7 +997,7 @@ mod tests {
let (device, _) = gfx_dev_and_queue!();
let desc = DescriptorsCount {
uniform_buffer: 1,
.. DescriptorsCount::zero()
..DescriptorsCount::zero()
};
let _ = UnsafeDescriptorPool::new(device, &desc, 0, false);
@ -975,20 +1016,21 @@ mod tests {
let layout = DescriptorDesc {
ty: DescriptorDescTy::Buffer(DescriptorBufferDesc {
dynamic: Some(false),
storage: false,
content: DescriptorBufferContentDesc::F32,
}),
dynamic: Some(false),
storage: false,
content: DescriptorBufferContentDesc::F32,
}),
array_count: 1,
stages: ShaderStages::all_graphics(),
readonly: true,
};
let set_layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(layout))).unwrap();
let set_layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(layout)))
.unwrap();
let desc = DescriptorsCount {
uniform_buffer: 10,
.. DescriptorsCount::zero()
..DescriptorsCount::zero()
};
let mut pool = UnsafeDescriptorPool::new(device, &desc, 10, false).unwrap();
@ -1006,10 +1048,10 @@ mod tests {
let layout = DescriptorDesc {
ty: DescriptorDescTy::Buffer(DescriptorBufferDesc {
dynamic: Some(false),
storage: false,
content: DescriptorBufferContentDesc::F32,
}),
dynamic: Some(false),
storage: false,
content: DescriptorBufferContentDesc::F32,
}),
array_count: 1,
stages: ShaderStages::all_graphics(),
readonly: true,
@ -1019,7 +1061,7 @@ mod tests {
let desc = DescriptorsCount {
uniform_buffer: 10,
.. DescriptorsCount::zero()
..DescriptorsCount::zero()
};
let mut pool = UnsafeDescriptorPool::new(device2, &desc, 10, false).unwrap();
@ -1034,7 +1076,7 @@ mod tests {
let desc = DescriptorsCount {
uniform_buffer: 1,
.. DescriptorsCount::zero()
..DescriptorsCount::zero()
};
let mut pool = UnsafeDescriptorPool::new(device, &desc, 1, false).unwrap();

View File

@ -7,15 +7,15 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use smallvec::SmallVec;
use check_errors;
use OomError;
use VulkanObject;
use check_errors;
use vk;
use descriptor::descriptor::DescriptorDesc;
@ -49,26 +49,30 @@ impl UnsafeDescriptorSetLayout {
{
let mut descriptors_count = DescriptorsCount::zero();
let bindings = descriptors.into_iter().enumerate().filter_map(|(binding, desc)| {
let desc = match desc {
Some(d) => d,
None => return None
};
let bindings = descriptors
.into_iter()
.enumerate()
.filter_map(|(binding, desc)| {
let desc = match desc {
Some(d) => d,
None => return None,
};
// FIXME: it is not legal to pass eg. the TESSELLATION_SHADER bit when the device
// doesn't have tess shaders enabled
// FIXME: it is not legal to pass eg. the TESSELLATION_SHADER bit when the device
// doesn't have tess shaders enabled
let ty = desc.ty.ty().unwrap(); // TODO: shouldn't panic
descriptors_count.add_one(ty);
let ty = desc.ty.ty().unwrap(); // TODO: shouldn't panic
descriptors_count.add_one(ty);
Some(vk::DescriptorSetLayoutBinding {
binding: binding as u32,
descriptorType: ty as u32,
descriptorCount: desc.array_count,
stageFlags: desc.stages.into(),
pImmutableSamplers: ptr::null(), // FIXME: not yet implemented
Some(vk::DescriptorSetLayoutBinding {
binding: binding as u32,
descriptorType: ty as u32,
descriptorCount: desc.array_count,
stageFlags: desc.stages.into(),
pImmutableSamplers: ptr::null(), // FIXME: not yet implemented
})
})
}).collect::<SmallVec<[_; 32]>>();
.collect::<SmallVec<[_; 32]>>();
// Note that it seems legal to have no descriptor at all in the set.
@ -76,23 +80,25 @@ impl UnsafeDescriptorSetLayout {
let infos = vk::DescriptorSetLayoutCreateInfo {
sType: vk::STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
bindingCount: bindings.len() as u32,
pBindings: bindings.as_ptr(),
};
let mut output = mem::uninitialized();
let vk = device.pointers();
try!(check_errors(vk.CreateDescriptorSetLayout(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateDescriptorSetLayout(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(UnsafeDescriptorSetLayout {
layout: layout,
device: device,
descriptors_count: descriptors_count,
})
layout: layout,
device: device,
descriptors_count: descriptors_count,
})
}
/// Returns the number of descriptors of each type.
@ -132,22 +138,21 @@ impl Drop for UnsafeDescriptorSetLayout {
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.DestroyDescriptorSetLayout(self.device.internal_object(), self.layout,
ptr::null());
vk.DestroyDescriptorSetLayout(self.device.internal_object(), self.layout, ptr::null());
}
}
}
#[cfg(test)]
mod tests {
use std::iter;
use descriptor::descriptor::DescriptorBufferContentDesc;
use descriptor::descriptor::DescriptorBufferDesc;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::DescriptorDescTy;
use descriptor::descriptor::DescriptorBufferDesc;
use descriptor::descriptor::DescriptorBufferContentDesc;
use descriptor::descriptor::ShaderStages;
use descriptor::descriptor_set::DescriptorsCount;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use std::iter;
#[test]
fn empty() {
@ -161,10 +166,10 @@ mod tests {
let layout = DescriptorDesc {
ty: DescriptorDescTy::Buffer(DescriptorBufferDesc {
dynamic: Some(false),
storage: false,
content: DescriptorBufferContentDesc::F32,
}),
dynamic: Some(false),
storage: false,
content: DescriptorBufferContentDesc::F32,
}),
array_count: 1,
stages: ShaderStages::all_graphics(),
readonly: true,
@ -172,9 +177,10 @@ mod tests {
let sl = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(layout))).unwrap();
assert_eq!(sl.descriptors_count(), &DescriptorsCount {
uniform_buffer: 1,
.. DescriptorsCount::zero()
});
assert_eq!(sl.descriptors_count(),
&DescriptorsCount {
uniform_buffer: 1,
..DescriptorsCount::zero()
});
}
}

View File

@ -15,15 +15,15 @@ use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
/// Description of an empty pipeline layout.
///
/// # Example
///
///
/// ```
/// # use std::sync::Arc;
/// # use vulkano::device::Device;
/// use vulkano::descriptor::pipeline_layout::EmptyPipelineDesc;
/// use vulkano::descriptor::pipeline_layout::PipelineLayoutDesc;
///
///
/// # let device: Arc<Device> = return;
/// let pipeline_layout = EmptyPipelineDesc.build(device.clone()).unwrap();
/// let pipeline_layout = EmptyPipelineDesc.build(device.clone()).unwrap();
/// ```
#[derive(Debug, Copy, Clone)]
pub struct EmptyPipelineDesc;

View File

@ -42,7 +42,7 @@
//! graphics pipeline as parameter.
//!
//! # Custom pipeline layouts
//!
//!
//! In some situations, it is better (as in, faster) to share the same descriptor set or sets
//! between multiple pipelines that each use different descriptors. To do so, you have to create a
//! pipeline layout object in advance and pass it when you create the pipelines.
@ -57,10 +57,10 @@ pub use self::traits::PipelineLayoutAbstract;
pub use self::traits::PipelineLayoutDesc;
pub use self::traits::PipelineLayoutDescNames;
pub use self::traits::PipelineLayoutDescPcRange;
pub use self::traits::PipelineLayoutSuperset;
pub use self::traits::PipelineLayoutNotSupersetError;
pub use self::traits::PipelineLayoutSetsCompatible;
pub use self::traits::PipelineLayoutPushConstantsCompatible;
pub use self::traits::PipelineLayoutSetsCompatible;
pub use self::traits::PipelineLayoutSuperset;
pub use self::union::PipelineLayoutDescUnion;
mod empty;

View File

@ -7,31 +7,31 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use smallvec::SmallVec;
use check_errors;
use Error;
use OomError;
use VulkanObject;
use check_errors;
use vk;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::ShaderStages;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use descriptor::pipeline_layout::PipelineLayoutDesc;
use descriptor::pipeline_layout::PipelineLayoutDescNames;
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use device::Device;
use device::DeviceOwned;
/// Wrapper around the `PipelineLayout` Vulkan object. Describes to the Vulkan implementation the
/// descriptor sets and push constants available to your shaders
/// descriptor sets and push constants available to your shaders
pub struct PipelineLayout<L> {
device: Arc<Device>,
layout: vk::PipelineLayout,
@ -39,7 +39,9 @@ pub struct PipelineLayout<L> {
desc: L,
}
impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
impl<L> PipelineLayout<L>
where L: PipelineLayoutDesc
{
/// Creates a new `PipelineLayout`.
///
/// # Panic
@ -48,8 +50,7 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
/// device than the one passed as parameter.
#[inline]
pub fn new(device: Arc<Device>, desc: L)
-> Result<PipelineLayout<L>, PipelineLayoutCreationError>
{
-> Result<PipelineLayout<L>, PipelineLayoutCreationError> {
let vk = device.pointers();
let limits = device.physical_device().limits();
@ -58,24 +59,28 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
let mut layouts: SmallVec<[_; 16]> = SmallVec::new();
for num in 0 .. desc.num_sets() {
layouts.push(match desc.provided_set_layout(num) {
Some(l) => {
assert_eq!(l.device().internal_object(), device.internal_object());
l
},
None => {
let sets_iter = 0 .. desc.num_bindings_in_set(num).unwrap_or(0);
let desc_iter = sets_iter.map(|d| desc.descriptor(num, d));
Arc::new(try!(UnsafeDescriptorSetLayout::new(device.clone(), desc_iter)))
},
});
Some(l) => {
assert_eq!(l.device().internal_object(),
device.internal_object());
l
},
None => {
let sets_iter = 0 ..
desc.num_bindings_in_set(num).unwrap_or(0);
let desc_iter = sets_iter.map(|d| desc.descriptor(num, d));
Arc::new(UnsafeDescriptorSetLayout::new(device.clone(),
desc_iter)?)
},
});
}
layouts
};
// Grab the list of `vkDescriptorSetLayout` objects from `layouts`.
let layouts_ids = layouts.iter().map(|l| {
l.internal_object()
}).collect::<SmallVec<[_; 16]>>();
let layouts_ids = layouts
.iter()
.map(|l| l.internal_object())
.collect::<SmallVec<[_; 16]>>();
// FIXME: must also check per-descriptor-type limits (eg. max uniform buffer descriptors)
@ -88,7 +93,11 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
let mut out: SmallVec<[_; 8]> = SmallVec::new();
for pc_id in 0 .. desc.num_push_constants_ranges() {
let PipelineLayoutDescPcRange { offset, size, stages } = {
let PipelineLayoutDescPcRange {
offset,
size,
stages,
} = {
match desc.push_constants_range(pc_id) {
Some(o) => o,
None => continue,
@ -104,10 +113,10 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
}
out.push(vk::PushConstantRange {
stageFlags: stages.into(),
offset: offset as u32,
size: size as u32,
});
stageFlags: stages.into(),
offset: offset as u32,
size: size as u32,
});
}
out
@ -117,17 +126,17 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
// We check that with a debug_assert because it's supposed to be enforced by the
// `PipelineLayoutDesc`.
debug_assert!({
let mut stages = 0;
let mut outcome = true;
for pc in push_constants.iter() {
if (stages & pc.stageFlags) != 0 {
outcome = false;
break;
}
stages &= pc.stageFlags;
}
outcome
});
let mut stages = 0;
let mut outcome = true;
for pc in push_constants.iter() {
if (stages & pc.stageFlags) != 0 {
outcome = false;
break;
}
stages &= pc.stageFlags;
}
outcome
});
// FIXME: it is not legal to pass eg. the TESSELLATION_SHADER bit when the device doesn't
// have tess shaders enabled
@ -137,7 +146,7 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
let infos = vk::PipelineLayoutCreateInfo {
sType: vk::STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
setLayoutCount: layouts_ids.len() as u32,
pSetLayouts: layouts_ids.as_ptr(),
pushConstantRangeCount: push_constants.len() as u32,
@ -145,21 +154,25 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreatePipelineLayout(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreatePipelineLayout(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(PipelineLayout {
device: device.clone(),
layout: layout,
layouts: layouts,
desc: desc,
})
device: device.clone(),
layout: layout,
layouts: layouts,
desc: desc,
})
}
}
impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
impl<L> PipelineLayout<L>
where L: PipelineLayoutDesc
{
/// Returns the description of the pipeline layout.
#[inline]
pub fn desc(&self) -> &L {
@ -167,7 +180,9 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
}
}
unsafe impl<D> PipelineLayoutAbstract for PipelineLayout<D> where D: PipelineLayoutDescNames {
unsafe impl<D> PipelineLayoutAbstract for PipelineLayout<D>
where D: PipelineLayoutDescNames
{
#[inline]
fn sys(&self) -> PipelineLayoutSys {
PipelineLayoutSys(&self.layout)
@ -179,7 +194,9 @@ unsafe impl<D> PipelineLayoutAbstract for PipelineLayout<D> where D: PipelineLay
}
}
unsafe impl<D> PipelineLayoutDesc for PipelineLayout<D> where D: PipelineLayoutDesc {
unsafe impl<D> PipelineLayoutDesc for PipelineLayout<D>
where D: PipelineLayoutDesc
{
#[inline]
fn num_sets(&self) -> usize {
self.desc.num_sets()
@ -206,7 +223,9 @@ unsafe impl<D> PipelineLayoutDesc for PipelineLayout<D> where D: PipelineLayoutD
}
}
unsafe impl<D> PipelineLayoutDescNames for PipelineLayout<D> where D: PipelineLayoutDescNames {
unsafe impl<D> PipelineLayoutDescNames for PipelineLayout<D>
where D: PipelineLayoutDescNames
{
#[inline]
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
self.desc.descriptor_by_name(name)
@ -220,7 +239,9 @@ unsafe impl<D> DeviceOwned for PipelineLayout<D> {
}
}
impl<D> fmt::Debug for PipelineLayout<D> where D: fmt::Debug {
impl<D> fmt::Debug for PipelineLayout<D>
where D: fmt::Debug
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt.debug_struct("PipelineLayout")
.field("raw", &self.layout)
@ -293,7 +314,7 @@ impl error::Error for PipelineLayoutCreationError {
fn cause(&self) -> Option<&error::Error> {
match *self {
PipelineLayoutCreationError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -322,7 +343,7 @@ impl From<Error> for PipelineLayoutCreationError {
err @ Error::OutOfDeviceMemory => {
PipelineLayoutCreationError::OomError(OomError::from(err))
},
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}

View File

@ -7,22 +7,22 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::cmp;
use std::error;
use std::fmt;
use std::cmp;
use std::sync::Arc;
use SafeDeref;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor::ShaderStages;
use descriptor::descriptor_set::DescriptorSetsCollection;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::PipelineLayout;
use descriptor::pipeline_layout::PipelineLayoutCreationError;
use descriptor::pipeline_layout::PipelineLayoutDescUnion;
use descriptor::pipeline_layout::PipelineLayoutSys;
use descriptor::pipeline_layout::PipelineLayoutCreationError;
use device::Device;
use device::DeviceOwned;
use SafeDeref;
/// Trait for objects that describe the layout of the descriptors and push constants of a pipeline.
// TODO: meh for PipelineLayoutDescNames ; the `Names` thing shouldn't be mandatory
@ -40,7 +40,10 @@ pub unsafe trait PipelineLayoutAbstract: PipelineLayoutDescNames + DeviceOwned {
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>>;
}
unsafe impl<T> PipelineLayoutAbstract for T where T: SafeDeref, T::Target: PipelineLayoutAbstract {
unsafe impl<T> PipelineLayoutAbstract for T
where T: SafeDeref,
T::Target: PipelineLayoutAbstract
{
#[inline]
fn sys(&self) -> PipelineLayoutSys {
(**self).sys()
@ -91,7 +94,9 @@ pub unsafe trait PipelineLayoutDesc {
/// Builds the union of this layout and another.
#[inline]
fn union<T>(self, other: T) -> PipelineLayoutDescUnion<Self, T> where Self: Sized {
fn union<T>(self, other: T) -> PipelineLayoutDescUnion<Self, T>
where Self: Sized
{
PipelineLayoutDescUnion::new(self, other)
}
@ -99,8 +104,7 @@ pub unsafe trait PipelineLayoutDesc {
///
/// > **Note**: This is just a shortcut for `PipelineLayout::new`.
#[inline]
fn build(self, device: Arc<Device>)
-> Result<PipelineLayout<Self>, PipelineLayoutCreationError>
fn build(self, device: Arc<Device>) -> Result<PipelineLayout<Self>, PipelineLayoutCreationError>
where Self: Sized
{
PipelineLayout::new(device, self)
@ -120,7 +124,10 @@ pub struct PipelineLayoutDescPcRange {
pub stages: ShaderStages,
}
unsafe impl<T> PipelineLayoutDesc for T where T: SafeDeref, T::Target: PipelineLayoutDesc {
unsafe impl<T> PipelineLayoutDesc for T
where T: SafeDeref,
T::Target: PipelineLayoutDesc
{
#[inline]
fn num_sets(&self) -> usize {
(**self).num_sets()
@ -155,7 +162,10 @@ pub unsafe trait PipelineLayoutDescNames: PipelineLayoutDesc {
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)>;
}
unsafe impl<T> PipelineLayoutDescNames for T where T: SafeDeref, T::Target: PipelineLayoutDescNames {
unsafe impl<T> PipelineLayoutDescNames for T
where T: SafeDeref,
T::Target: PipelineLayoutDescNames
{
#[inline]
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
(**self).descriptor_by_name(name)
@ -174,7 +184,8 @@ pub unsafe trait PipelineLayoutSuperset<Other: ?Sized>: PipelineLayoutDesc
}
unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSuperset<U> for T
where T: PipelineLayoutDesc, U: PipelineLayoutDesc
where T: PipelineLayoutDesc,
U: PipelineLayoutDesc
{
fn ensure_superset_of(&self, other: &U) -> Result<(), PipelineLayoutNotSupersetError> {
for set_num in 0 .. cmp::max(self.num_sets(), other.num_sets()) {
@ -183,10 +194,10 @@ unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSuperset<U> for T
if self_num_bindings < other_num_bindings {
return Err(PipelineLayoutNotSupersetError::DescriptorsCountMismatch {
set_num: set_num as u32,
self_num_descriptors: self_num_bindings as u32,
other_num_descriptors: other_num_bindings as u32,
});
set_num: set_num as u32,
self_num_descriptors: self_num_bindings as u32,
other_num_descriptors: other_num_bindings as u32,
});
}
for desc_num in 0 .. other_num_bindings {
@ -200,11 +211,12 @@ unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSuperset<U> for T
});
}
},
(None, Some(_)) => return Err(PipelineLayoutNotSupersetError::ExpectedEmptyDescriptor {
set_num: set_num as u32,
descriptor: desc_num as u32,
}),
_ => ()
(None, Some(_)) =>
return Err(PipelineLayoutNotSupersetError::ExpectedEmptyDescriptor {
set_num: set_num as u32,
descriptor: desc_num as u32,
}),
_ => (),
}
}
}
@ -222,14 +234,11 @@ pub enum PipelineLayoutNotSupersetError {
DescriptorsCountMismatch {
set_num: u32,
self_num_descriptors: u32,
other_num_descriptors: u32
other_num_descriptors: u32,
},
/// Expected an empty descriptor, but got something instead.
ExpectedEmptyDescriptor {
set_num: u32,
descriptor: u32,
},
ExpectedEmptyDescriptor { set_num: u32, descriptor: u32 },
/// Two descriptors are incompatible.
IncompatibleDescriptors {
@ -263,7 +272,7 @@ impl fmt::Display for PipelineLayoutNotSupersetError {
}
}
/// Traits that allow determining whether
/// Traits that allow determining whether
pub unsafe trait PipelineLayoutSetsCompatible<Other: ?Sized>: PipelineLayoutDesc
where Other: DescriptorSetsCollection
{
@ -272,7 +281,8 @@ pub unsafe trait PipelineLayoutSetsCompatible<Other: ?Sized>: PipelineLayoutDesc
}
unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSetsCompatible<U> for T
where T: PipelineLayoutDesc, U: DescriptorSetsCollection
where T: PipelineLayoutDesc,
U: DescriptorSetsCollection
{
fn is_compatible(&self, sets: &U) -> bool {
/*let mut other_descriptor_sets = DescriptorSetsCollection::description(sets);
@ -295,14 +305,15 @@ unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSetsCompatible<U> for T
}
}*/
// FIXME:
// FIXME:
true
}
}
/// Traits that allow determining whether
/// Traits that allow determining whether
// TODO: require a trait on Pc
pub unsafe trait PipelineLayoutPushConstantsCompatible<Pc: ?Sized>: PipelineLayoutDesc {
pub unsafe trait PipelineLayoutPushConstantsCompatible<Pc: ?Sized>
: PipelineLayoutDesc {
/// Returns true if `Pc` can be used with a pipeline that uses `self` as layout.
fn is_compatible(&self, &Pc) -> bool;
}

View File

@ -1,149 +1,157 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::cmp;
use std::sync::Arc;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::PipelineLayoutDesc;
use descriptor::pipeline_layout::PipelineLayoutDescNames;
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
/// Contains the union of two pipeline layout description.
///
/// If `A` and `B` both implement `PipelineLayoutDesc`, then this struct also implements
/// `PipelineLayoutDesc` and will correspond to the union of the `A` object and the `B` object.
pub struct PipelineLayoutDescUnion<A, B> {
a: A,
b: B,
}
impl<A, B> PipelineLayoutDescUnion<A, B> {
// FIXME: check collisions
pub fn new(a: A, b: B) -> PipelineLayoutDescUnion<A, B> {
PipelineLayoutDescUnion { a: a, b: b }
}
}
unsafe impl<A, B> PipelineLayoutDesc for PipelineLayoutDescUnion<A, B>
where A: PipelineLayoutDesc, B: PipelineLayoutDesc
{
#[inline]
fn num_sets(&self) -> usize {
cmp::max(self.a.num_sets(), self.b.num_sets())
}
#[inline]
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
let a = self.a.num_bindings_in_set(set);
let b = self.b.num_bindings_in_set(set);
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::PipelineLayoutDesc;
use descriptor::pipeline_layout::PipelineLayoutDescNames;
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
use std::cmp;
use std::sync::Arc;
/// Contains the union of two pipeline layout description.
///
/// If `A` and `B` both implement `PipelineLayoutDesc`, then this struct also implements
/// `PipelineLayoutDesc` and will correspond to the union of the `A` object and the `B` object.
pub struct PipelineLayoutDescUnion<A, B> {
a: A,
b: B,
}
impl<A, B> PipelineLayoutDescUnion<A, B> {
// FIXME: check collisions
pub fn new(a: A, b: B) -> PipelineLayoutDescUnion<A, B> {
PipelineLayoutDescUnion { a: a, b: b }
}
}
unsafe impl<A, B> PipelineLayoutDesc for PipelineLayoutDescUnion<A, B>
where A: PipelineLayoutDesc,
B: PipelineLayoutDesc
{
#[inline]
fn num_sets(&self) -> usize {
cmp::max(self.a.num_sets(), self.b.num_sets())
}
#[inline]
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
let a = self.a.num_bindings_in_set(set);
let b = self.b.num_bindings_in_set(set);
match (a, b) {
(Some(a), Some(b)) => Some(cmp::max(a, b)),
(Some(a), None) => Some(a),
(None, Some(b)) => Some(b),
(None, None) => None,
}
}
#[inline]
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
let a = self.a.descriptor(set, binding);
let b = self.b.descriptor(set, binding);
}
}
#[inline]
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
let a = self.a.descriptor(set, binding);
let b = self.b.descriptor(set, binding);
match (a, b) {
(Some(a), Some(b)) => Some(a.union(&b).expect("Can't be union-ed")),
(Some(a), None) => Some(a),
(None, Some(b)) => Some(b),
(None, None) => None,
}
}
#[inline]
fn provided_set_layout(&self, set: usize) -> Option<Arc<UnsafeDescriptorSetLayout>> {
self.a.provided_set_layout(set).or(self.b.provided_set_layout(set))
}
#[inline]
fn num_push_constants_ranges(&self) -> usize {
// We simply call `push_constants_range` repeatidely to determine when it is over.
// TODO: consider caching this
(self.a.num_push_constants_ranges() ..).filter(|&n| {
self.push_constants_range(n).is_none()
}).next().unwrap()
}
// TODO: needs tests
#[inline]
fn push_constants_range(&self, num: usize) -> Option<PipelineLayoutDescPcRange> {
// The strategy here is that we return the same ranges as `self.a`, except that if there
// happens to be a range with a similar stage in `self.b` then we adjust the offset and
// size of the range coming from `self.a` to include the range of `self.b`.
//
// After all the ranges of `self.a` have been returned, we return the ones from `self.b`
// that don't intersect with any range of `self.a`.
if let Some(mut pc) = self.a.push_constants_range(num) {
// We try to find the ranges in `self.b` that share the same stages as us.
for n in 0 .. self.b.num_push_constants_ranges() {
let other_pc = self.b.push_constants_range(n).unwrap();
if other_pc.stages.intersects(&pc.stages) {
if other_pc.offset < pc.offset {
pc.size += pc.offset - other_pc.offset;
pc.size = cmp::max(pc.size, other_pc.size);
pc.offset = other_pc.offset;
} else if other_pc.offset > pc.offset {
pc.size = cmp::max(pc.size, other_pc.size + (other_pc.offset - pc.offset));
}
}
}
return Some(pc);
}
let mut num = num - self.a.num_push_constants_ranges();
'outer_loop: for b_r in 0 .. self.b.num_push_constants_ranges() {
let pc = self.b.push_constants_range(b_r).unwrap();
for n in 0 .. self.a.num_push_constants_ranges() {
let other_pc = self.a.push_constants_range(n).unwrap();
if other_pc.stages.intersects(&pc.stages) {
continue 'outer_loop;
}
}
if num == 0 {
return Some(pc);
} else {
num -= 1;
}
}
None
}
}
unsafe impl<A, B> PipelineLayoutDescNames for PipelineLayoutDescUnion<A, B>
where A: PipelineLayoutDescNames, B: PipelineLayoutDescNames
{
#[inline]
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
let a = self.a.descriptor_by_name(name);
let b = self.b.descriptor_by_name(name);
}
}
#[inline]
fn provided_set_layout(&self, set: usize) -> Option<Arc<UnsafeDescriptorSetLayout>> {
self.a
.provided_set_layout(set)
.or(self.b.provided_set_layout(set))
}
#[inline]
fn num_push_constants_ranges(&self) -> usize {
// We simply call `push_constants_range` repeatidely to determine when it is over.
// TODO: consider caching this
(self.a.num_push_constants_ranges() ..)
.filter(|&n| self.push_constants_range(n).is_none())
.next()
.unwrap()
}
// TODO: needs tests
#[inline]
fn push_constants_range(&self, num: usize) -> Option<PipelineLayoutDescPcRange> {
// The strategy here is that we return the same ranges as `self.a`, except that if there
// happens to be a range with a similar stage in `self.b` then we adjust the offset and
// size of the range coming from `self.a` to include the range of `self.b`.
//
// After all the ranges of `self.a` have been returned, we return the ones from `self.b`
// that don't intersect with any range of `self.a`.
if let Some(mut pc) = self.a.push_constants_range(num) {
// We try to find the ranges in `self.b` that share the same stages as us.
for n in 0 .. self.b.num_push_constants_ranges() {
let other_pc = self.b.push_constants_range(n).unwrap();
if other_pc.stages.intersects(&pc.stages) {
if other_pc.offset < pc.offset {
pc.size += pc.offset - other_pc.offset;
pc.size = cmp::max(pc.size, other_pc.size);
pc.offset = other_pc.offset;
} else if other_pc.offset > pc.offset {
pc.size = cmp::max(pc.size, other_pc.size + (other_pc.offset - pc.offset));
}
}
}
return Some(pc);
}
let mut num = num - self.a.num_push_constants_ranges();
'outer_loop: for b_r in 0 .. self.b.num_push_constants_ranges() {
let pc = self.b.push_constants_range(b_r).unwrap();
for n in 0 .. self.a.num_push_constants_ranges() {
let other_pc = self.a.push_constants_range(n).unwrap();
if other_pc.stages.intersects(&pc.stages) {
continue 'outer_loop;
}
}
if num == 0 {
return Some(pc);
} else {
num -= 1;
}
}
None
}
}
unsafe impl<A, B> PipelineLayoutDescNames for PipelineLayoutDescUnion<A, B>
where A: PipelineLayoutDescNames,
B: PipelineLayoutDescNames
{
#[inline]
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
let a = self.a.descriptor_by_name(name);
let b = self.b.descriptor_by_name(name);
match (a, b) {
(None, None) => None,
(Some(r), None) => Some(r),
(None, Some(r)) => Some(r),
(Some(a), Some(b)) => { assert_eq!(a, b); Some(a) }
}
}
}
(Some(a), Some(b)) => {
assert_eq!(a, b);
Some(a)
},
}
}
}

View File

@ -89,10 +89,12 @@
//!
//! TODO: write
use fnv::FnvHasher;
use smallvec::SmallVec;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::fmt;
use std::error;
use std::fmt;
use std::hash::BuildHasherDefault;
use std::mem;
use std::ops::Deref;
@ -101,8 +103,6 @@ use std::sync::Arc;
use std::sync::Mutex;
use std::sync::MutexGuard;
use std::sync::Weak;
use smallvec::SmallVec;
use fnv::FnvHasher;
use command_buffer::pool::StandardCommandPool;
use descriptor::descriptor_set::StdDescriptorPool;
@ -129,15 +129,18 @@ pub struct Device {
vk: vk::DevicePointers,
standard_pool: Mutex<Weak<StdMemoryPool>>,
standard_descriptor_pool: Mutex<Weak<StdDescriptorPool>>,
standard_command_pools: Mutex<HashMap<u32, Weak<StandardCommandPool>, BuildHasherDefault<FnvHasher>>>,
standard_command_pools:
Mutex<HashMap<u32, Weak<StandardCommandPool>, BuildHasherDefault<FnvHasher>>>,
features: Features,
extensions: DeviceExtensions,
}
// The `StandardCommandPool` type doesn't implement Send/Sync, so we have to manually reimplement
// them for the device itself.
unsafe impl Send for Device {}
unsafe impl Sync for Device {}
unsafe impl Send for Device {
}
unsafe impl Sync for Device {
}
impl Device {
/// Builds a new Vulkan device for the given physical device.
@ -161,10 +164,10 @@ impl Device {
// TODO: return Arc<Queue> and handle synchronization in the Queue
// TODO: should take the PhysicalDevice by value
pub fn new<'a, I, Ext>(phys: &'a PhysicalDevice, requested_features: &Features,
extensions: Ext, queue_families: I)
extensions: Ext, queue_families: I)
-> Result<(Arc<Device>, QueuesIter), DeviceCreationError>
where I: IntoIterator<Item = (QueueFamily<'a>, f32)>,
Ext: Into<RawDeviceExtensions>,
Ext: Into<RawDeviceExtensions>
{
let queue_families = queue_families.into_iter();
@ -186,14 +189,16 @@ impl Device {
// Because there's no way to query the list of layers enabled for an instance, we need
// to save it alongside the instance. (`vkEnumerateDeviceLayerProperties` should get
// the right list post-1.0.13, but not pre-1.0.13, so we can't use it here.)
let layers_ptr = phys.instance().loaded_layers().map(|layer| {
layer.as_ptr()
}).collect::<SmallVec<[_; 16]>>();
let layers_ptr = phys.instance()
.loaded_layers()
.map(|layer| layer.as_ptr())
.collect::<SmallVec<[_; 16]>>();
let extensions = extensions.into();
let extensions_list = extensions.iter().map(|extension| {
extension.as_ptr()
}).collect::<SmallVec<[_; 16]>>();
let extensions_list = extensions
.iter()
.map(|extension| extension.as_ptr())
.collect::<SmallVec<[_; 16]>>();
// device creation
let device = unsafe {
@ -223,16 +228,19 @@ impl Device {
}
// turning `queues` into an array of `vkDeviceQueueCreateInfo` suitable for Vulkan
let queues = queues.iter().map(|&(queue_id, ref priorities)| {
vk::DeviceQueueCreateInfo {
sType: vk::STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
queueFamilyIndex: queue_id,
queueCount: priorities.len() as u32,
pQueuePriorities: priorities.as_ptr()
}
}).collect::<SmallVec<[_; 16]>>();
let queues = queues
.iter()
.map(|&(queue_id, ref priorities)| {
vk::DeviceQueueCreateInfo {
sType: vk::STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
queueFamilyIndex: queue_id,
queueCount: priorities.len() as u32,
pQueuePriorities: priorities.as_ptr(),
}
})
.collect::<SmallVec<[_; 16]>>();
// TODO: The plan regarding `robustBufferAccess` is to check the shaders' code to see
// if they can possibly perform out-of-bounds reads and writes. If the user tries
@ -256,7 +264,7 @@ impl Device {
let infos = vk::DeviceCreateInfo {
sType: vk::STRUCTURE_TYPE_DEVICE_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
queueCreateInfoCount: queues.len() as u32,
pQueueCreateInfos: queues.as_ptr(),
enabledLayerCount: layers_ptr.len() as u32,
@ -267,27 +275,30 @@ impl Device {
};
let mut output = mem::uninitialized();
try!(check_errors(vk_i.CreateDevice(phys.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk_i.CreateDevice(phys.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
// loading the function pointers of the newly-created device
let vk = vk::DevicePointers::load(|name| {
unsafe { vk_i.GetDeviceProcAddr(device, name.as_ptr()) as *const _ }
});
let vk = vk::DevicePointers::load(|name| unsafe {
vk_i.GetDeviceProcAddr(device, name.as_ptr()) as
*const _
});
let device = Arc::new(Device {
instance: phys.instance().clone(),
physical_device: phys.index(),
device: device,
vk: vk,
standard_pool: Mutex::new(Weak::new()),
standard_descriptor_pool: Mutex::new(Weak::new()),
standard_command_pools: Mutex::new(Default::default()),
features: requested_features.clone(),
extensions: (&extensions).into(),
});
instance: phys.instance().clone(),
physical_device: phys.index(),
device: device,
vk: vk,
standard_pool: Mutex::new(Weak::new()),
standard_descriptor_pool: Mutex::new(Weak::new()),
standard_command_pools: Mutex::new(Default::default()),
features: requested_features.clone(),
extensions: (&extensions).into(),
});
// Iterator for the produced queues.
let output_queues = QueuesIter {
@ -317,7 +328,7 @@ impl Device {
/// while this function is waiting.
///
pub unsafe fn wait(&self) -> Result<(), OomError> {
try!(check_errors(self.vk.DeviceWaitIdle(self.device)));
check_errors(self.vk.DeviceWaitIdle(self.device))?;
Ok(())
}
@ -397,7 +408,7 @@ impl Device {
let new_pool = Arc::new(StandardCommandPool::new(me.clone(), queue));
entry.insert(Arc::downgrade(&new_pool));
new_pool
}
},
}
}
}
@ -439,7 +450,10 @@ pub unsafe trait DeviceOwned {
fn device(&self) -> &Arc<Device>;
}
unsafe impl<T> DeviceOwned for T where T: Deref, T::Target: DeviceOwned {
unsafe impl<T> DeviceOwned for T
where T: Deref,
T::Target: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
(**self).device()
@ -460,20 +474,22 @@ impl Iterator for QueuesIter {
unsafe {
let &(family, id) = match self.families_and_ids.get(self.next_queue) {
Some(a) => a,
None => return None
None => return None,
};
self.next_queue += 1;
let mut output = mem::uninitialized();
self.device.vk.GetDeviceQueue(self.device.device, family, id, &mut output);
self.device
.vk
.GetDeviceQueue(self.device.device, family, id, &mut output);
Some(Arc::new(Queue {
queue: Mutex::new(output),
device: self.device.clone(),
family: family,
id: id,
}))
queue: Mutex::new(output),
device: self.device.clone(),
family: family,
id: id,
}))
}
}
@ -484,7 +500,8 @@ impl Iterator for QueuesIter {
}
}
impl ExactSizeIterator for QueuesIter {}
impl ExactSizeIterator for QueuesIter {
}
/// Error that can be returned when creating a device.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
@ -562,7 +579,7 @@ impl From<Error> for DeviceCreationError {
Error::ExtensionNotPresent => DeviceCreationError::ExtensionNotPresent,
Error::FeatureNotPresent => DeviceCreationError::FeatureNotPresent,
Error::TooManyObjects => DeviceCreationError::TooManyObjects,
_ => panic!("Unexpected error value: {}", err as i32)
_ => panic!("Unexpected error value: {}", err as i32),
}
}
}
@ -574,7 +591,7 @@ pub struct Queue {
queue: Mutex<vk::Queue>,
device: Arc<Device>,
family: u32,
id: u32, // id within family
id: u32, // id within family
}
impl Queue {
@ -587,15 +604,17 @@ impl Queue {
/// Returns true if this is the same queue as another one.
#[inline]
pub fn is_same(&self, other: &Queue) -> bool {
self.id == other.id &&
self.family == other.family &&
self.id == other.id && self.family == other.family &&
self.device.internal_object() == other.device.internal_object()
}
/// Returns the family this queue belongs to.
#[inline]
pub fn family(&self) -> QueueFamily {
self.device.physical_device().queue_family_by_id(self.family).unwrap()
self.device
.physical_device()
.queue_family_by_id(self.family)
.unwrap()
}
/// Returns the index of this queue within its family.
@ -612,7 +631,7 @@ impl Queue {
unsafe {
let vk = self.device.pointers();
let queue = self.queue.lock().unwrap();
try!(check_errors(vk.QueueWaitIdle(*queue)));
check_errors(vk.QueueWaitIdle(*queue))?;
Ok(())
}
}
@ -629,12 +648,12 @@ unsafe impl SynchronizedVulkanObject for Queue {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use device::Device;
use device::DeviceCreationError;
use device::DeviceExtensions;
use features::Features;
use instance;
use std::sync::Arc;
#[test]
fn one_ref() {
@ -647,15 +666,18 @@ mod tests {
let instance = instance!();
let physical = match instance::PhysicalDevice::enumerate(&instance).next() {
Some(p) => p,
None => return
None => return,
};
let family = physical.queue_families().next().unwrap();
let queues = (0 .. family.queues_count() + 1).map(|_| (family, 1.0));
match Device::new(&physical, &Features::none(), &DeviceExtensions::none(), queues) {
match Device::new(&physical,
&Features::none(),
&DeviceExtensions::none(),
queues) {
Err(DeviceCreationError::TooManyQueuesForFamily) => return, // Success
_ => panic!()
_ => panic!(),
};
}
@ -664,7 +686,7 @@ mod tests {
let instance = instance!();
let physical = match instance::PhysicalDevice::enumerate(&instance).next() {
Some(p) => p,
None => return
None => return,
};
let family = physical.queue_families().next().unwrap();
@ -675,9 +697,12 @@ mod tests {
return;
}
match Device::new(&physical, &features, &DeviceExtensions::none(), Some((family, 1.0))) {
match Device::new(&physical,
&features,
&DeviceExtensions::none(),
Some((family, 1.0))) {
Err(DeviceCreationError::FeatureNotPresent) => return, // Success
_ => panic!()
_ => panic!(),
};
}
@ -686,23 +711,25 @@ mod tests {
let instance = instance!();
let physical = match instance::PhysicalDevice::enumerate(&instance).next() {
Some(p) => p,
None => return
None => return,
};
let family = physical.queue_families().next().unwrap();
match Device::new(&physical, &Features::none(),
&DeviceExtensions::none(), Some((family, 1.4)))
{
match Device::new(&physical,
&Features::none(),
&DeviceExtensions::none(),
Some((family, 1.4))) {
Err(DeviceCreationError::PriorityOutOfRange) => (), // Success
_ => panic!()
_ => panic!(),
};
match Device::new(&physical, &Features::none(),
&DeviceExtensions::none(), Some((family, -0.2)))
{
match Device::new(&physical,
&Features::none(),
&DeviceExtensions::none(),
Some((family, -0.2))) {
Err(DeviceCreationError::PriorityOutOfRange) => (), // Success
_ => panic!()
_ => panic!(),
};
}
}

View File

@ -49,11 +49,11 @@
//! conversion.
//!
//! # Choosing a format
//!
//!
//! The following formats are guaranteed to be supported for everything that is related to
//! texturing (ie. blitting source and sampling them linearly). You should choose one of these
//! formats if you have an image that you are going to sample from:
//!
//!
//! // TODO: use vulkano enums
//! - B4G4R4A4_UNORM_PACK16
//! - R5G6B5_UNORM_PACK16
@ -101,6 +101,7 @@
//!
//! // TODO: storage formats
//!
use std::vec::IntoIter as VecIntoIter;
use vk;
@ -120,11 +121,15 @@ pub unsafe trait Data {
// TODO: that's just an example ; implement for all common data types
unsafe impl Data for i8 {
#[inline]
fn ty() -> Format { Format::R8Sint }
fn ty() -> Format {
Format::R8Sint
}
}
unsafe impl Data for u8 {
#[inline]
fn ty() -> Format { Format::R8Uint }
fn ty() -> Format {
Format::R8Uint
}
}
macro_rules! formats {
@ -572,7 +577,7 @@ unsafe impl FormatDesc for Format {
(FormatTy::Depth, f @ ClearValue::Depth(_)) => f,
(FormatTy::Stencil, f @ ClearValue::Stencil(_)) => f,
(FormatTy::DepthStencil, f @ ClearValue::DepthStencil(_)) => f,
_ => panic!("Wrong clear value")
_ => panic!("Wrong clear value"),
}
}
}
@ -585,7 +590,9 @@ pub unsafe trait PossibleFloatFormatDesc: FormatDesc {
unsafe impl PossibleFloatFormatDesc for Format {
#[inline]
fn is_float(&self) -> bool { self.ty() == FormatTy::Float }
fn is_float(&self) -> bool {
self.ty() == FormatTy::Float
}
}
pub unsafe trait PossibleUintFormatDesc: FormatDesc {
@ -594,7 +601,9 @@ pub unsafe trait PossibleUintFormatDesc: FormatDesc {
unsafe impl PossibleUintFormatDesc for Format {
#[inline]
fn is_uint(&self) -> bool { self.ty() == FormatTy::Uint }
fn is_uint(&self) -> bool {
self.ty() == FormatTy::Uint
}
}
pub unsafe trait PossibleSintFormatDesc: FormatDesc {
@ -603,7 +612,9 @@ pub unsafe trait PossibleSintFormatDesc: FormatDesc {
unsafe impl PossibleSintFormatDesc for Format {
#[inline]
fn is_sint(&self) -> bool { self.ty() == FormatTy::Sint }
fn is_sint(&self) -> bool {
self.ty() == FormatTy::Sint
}
}
pub unsafe trait PossibleDepthFormatDesc: FormatDesc {
@ -612,7 +623,9 @@ pub unsafe trait PossibleDepthFormatDesc: FormatDesc {
unsafe impl PossibleDepthFormatDesc for Format {
#[inline]
fn is_depth(&self) -> bool { self.ty() == FormatTy::Depth }
fn is_depth(&self) -> bool {
self.ty() == FormatTy::Depth
}
}
pub unsafe trait PossibleStencilFormatDesc: FormatDesc {
@ -621,7 +634,9 @@ pub unsafe trait PossibleStencilFormatDesc: FormatDesc {
unsafe impl PossibleStencilFormatDesc for Format {
#[inline]
fn is_stencil(&self) -> bool { self.ty() == FormatTy::Stencil }
fn is_stencil(&self) -> bool {
self.ty() == FormatTy::Stencil
}
}
pub unsafe trait PossibleDepthStencilFormatDesc: FormatDesc {
@ -630,7 +645,9 @@ pub unsafe trait PossibleDepthStencilFormatDesc: FormatDesc {
unsafe impl PossibleDepthStencilFormatDesc for Format {
#[inline]
fn is_depth_stencil(&self) -> bool { self.ty() == FormatTy::DepthStencil }
fn is_depth_stencil(&self) -> bool {
self.ty() == FormatTy::DepthStencil
}
}
pub unsafe trait PossibleCompressedFormatDesc: FormatDesc {
@ -639,7 +656,9 @@ pub unsafe trait PossibleCompressedFormatDesc: FormatDesc {
unsafe impl PossibleCompressedFormatDesc for Format {
#[inline]
fn is_compressed(&self) -> bool { self.ty() == FormatTy::Compressed }
fn is_compressed(&self) -> bool {
self.ty() == FormatTy::Compressed
}
}
/// Trait for types that can possibly describe a float or compressed attachment.
@ -723,21 +742,21 @@ impl From<[f32; 4]> for ClearValue {
impl From<[u32; 1]> for ClearValue {
#[inline]
fn from(val: [u32; 1]) -> ClearValue {
ClearValue::Uint([val[0], 0, 0, 0]) // TODO: is alpha value 0 correct?
ClearValue::Uint([val[0], 0, 0, 0]) // TODO: is alpha value 0 correct?
}
}
impl From<[u32; 2]> for ClearValue {
#[inline]
fn from(val: [u32; 2]) -> ClearValue {
ClearValue::Uint([val[0], val[1], 0, 0]) // TODO: is alpha value 0 correct?
ClearValue::Uint([val[0], val[1], 0, 0]) // TODO: is alpha value 0 correct?
}
}
impl From<[u32; 3]> for ClearValue {
#[inline]
fn from(val: [u32; 3]) -> ClearValue {
ClearValue::Uint([val[0], val[1], val[2], 0]) // TODO: is alpha value 0 correct?
ClearValue::Uint([val[0], val[1], val[2], 0]) // TODO: is alpha value 0 correct?
}
}
@ -751,21 +770,21 @@ impl From<[u32; 4]> for ClearValue {
impl From<[i32; 1]> for ClearValue {
#[inline]
fn from(val: [i32; 1]) -> ClearValue {
ClearValue::Int([val[0], 0, 0, 0]) // TODO: is alpha value 0 correct?
ClearValue::Int([val[0], 0, 0, 0]) // TODO: is alpha value 0 correct?
}
}
impl From<[i32; 2]> for ClearValue {
#[inline]
fn from(val: [i32; 2]) -> ClearValue {
ClearValue::Int([val[0], val[1], 0, 0]) // TODO: is alpha value 0 correct?
ClearValue::Int([val[0], val[1], 0, 0]) // TODO: is alpha value 0 correct?
}
}
impl From<[i32; 3]> for ClearValue {
#[inline]
fn from(val: [i32; 3]) -> ClearValue {
ClearValue::Int([val[0], val[1], val[2], 0]) // TODO: is alpha value 0 correct?
ClearValue::Int([val[0], val[1], val[2], 0]) // TODO: is alpha value 0 correct?
}
}

View File

@ -1,53 +1,57 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use SafeDeref;
use image::ImageViewAccess;
//use sync::AccessFlagBits;
//use sync::PipelineStages;
/// A list of attachments.
// TODO: rework this trait
pub unsafe trait AttachmentsList {
// TODO: meh for API
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess>;
}
unsafe impl<T> AttachmentsList for T where T: SafeDeref, T::Target: AttachmentsList {
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
(**self).as_image_view_accesses()
}
}
unsafe impl AttachmentsList for () {
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
vec![]
}
}
unsafe impl AttachmentsList for Vec<Arc<ImageViewAccess + Send + Sync>> {
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
self.iter().map(|p| &**p as &ImageViewAccess).collect()
}
}
unsafe impl<A, B> AttachmentsList for (A, B)
where A: AttachmentsList, B: ImageViewAccess
{
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
let mut list = self.0.as_image_view_accesses();
list.push(&self.1);
list
}
}
use SafeDeref;
use image::ImageViewAccess;
use std::sync::Arc;
//use sync::AccessFlagBits;
//use sync::PipelineStages;
/// A list of attachments.
// TODO: rework this trait
pub unsafe trait AttachmentsList {
// TODO: meh for API
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess>;
}
unsafe impl<T> AttachmentsList for T
where T: SafeDeref,
T::Target: AttachmentsList
{
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
(**self).as_image_view_accesses()
}
}
unsafe impl AttachmentsList for () {
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
vec![]
}
}
unsafe impl AttachmentsList for Vec<Arc<ImageViewAccess + Send + Sync>> {
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
self.iter().map(|p| &**p as &ImageViewAccess).collect()
}
}
unsafe impl<A, B> AttachmentsList for (A, B)
where A: AttachmentsList,
B: ImageViewAccess
{
#[inline]
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
let mut list = self.0.as_image_view_accesses();
list.push(&self.1);
list
}
}

View File

@ -10,11 +10,11 @@
//! This module contains the `ensure_image_view_compatible` function, which verifies whether
//! an image view can be used as a render pass attachment.
use std::error;
use std::fmt;
use format::Format;
use framebuffer::RenderPassDesc;
use image::ImageViewAccess;
use std::error;
use std::fmt;
/// Checks whether the given image view is allowed to be the nth attachment of the given render
/// pass.
@ -29,21 +29,22 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
where Rp: ?Sized + RenderPassDesc,
I: ?Sized + ImageViewAccess
{
let attachment_desc = render_pass.attachment_desc(attachment_num)
.expect("Attachment num out of range");
let attachment_desc = render_pass
.attachment_desc(attachment_num)
.expect("Attachment num out of range");
if image.format() != attachment_desc.format {
return Err(IncompatibleRenderPassAttachmentError::FormatMismatch {
expected: attachment_desc.format,
obtained: image.format(),
});
expected: attachment_desc.format,
obtained: image.format(),
});
}
if image.samples() != attachment_desc.samples {
return Err(IncompatibleRenderPassAttachmentError::SamplesMismatch {
expected: attachment_desc.samples,
obtained: image.samples(),
});
expected: attachment_desc.samples,
obtained: image.samples(),
});
}
if !image.identity_swizzle() {
@ -51,11 +52,16 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
}
for subpass_num in 0 .. render_pass.num_subpasses() {
let subpass = render_pass.subpass_desc(subpass_num).expect("Subpass num out of range ; \
wrong RenderPassDesc trait impl");
let subpass = render_pass
.subpass_desc(subpass_num)
.expect("Subpass num out of range ; wrong RenderPassDesc trait impl");
if subpass.color_attachments.iter().any(|&(n, _)| n == attachment_num) {
debug_assert!(image.parent().has_color()); // Was normally checked by the render pass.
if subpass
.color_attachments
.iter()
.any(|&(n, _)| n == attachment_num)
{
debug_assert!(image.parent().has_color()); // Was normally checked by the render pass.
if !image.parent().inner().usage_color_attachment() {
return Err(IncompatibleRenderPassAttachmentError::MissingColorAttachmentUsage);
}
@ -71,7 +77,11 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
}
}
if subpass.input_attachments.iter().any(|&(n, _)| n == attachment_num) {
if subpass
.input_attachments
.iter()
.any(|&(n, _)| n == attachment_num)
{
if !image.parent().inner().usage_input_attachment() {
return Err(IncompatibleRenderPassAttachmentError::MissingInputAttachmentUsage);
}
@ -158,11 +168,11 @@ impl fmt::Display for IncompatibleRenderPassAttachmentError {
#[cfg(test)]
mod tests {
use super::IncompatibleRenderPassAttachmentError;
use super::ensure_image_view_compatible;
use format::Format;
use framebuffer::EmptySinglePassRenderPassDesc;
use image::AttachmentImage;
use super::ensure_image_view_compatible;
use super::IncompatibleRenderPassAttachmentError;
#[test]
fn basic_ok() {
@ -184,7 +194,7 @@ mod tests {
).unwrap();
let img = AttachmentImage::new(device, [128, 128], Format::R8G8B8A8Unorm).unwrap();
ensure_image_view_compatible(&rp, 0, &img).unwrap();
}
@ -208,11 +218,13 @@ mod tests {
).unwrap();
let img = AttachmentImage::new(device, [128, 128], Format::R8G8B8A8Unorm).unwrap();
match ensure_image_view_compatible(&rp, 0, &img) {
Err(IncompatibleRenderPassAttachmentError::FormatMismatch {
expected: Format::R16G16Sfloat, obtained: Format::R8G8B8A8Unorm }) => (),
e => panic!("{:?}", e)
expected: Format::R16G16Sfloat,
obtained: Format::R8G8B8A8Unorm,
}) => (),
e => panic!("{:?}", e),
}
}
@ -223,7 +235,7 @@ mod tests {
let rp = EmptySinglePassRenderPassDesc;
let img = AttachmentImage::new(device, [128, 128], Format::R8G8B8A8Unorm).unwrap();
let _ = ensure_image_view_compatible(&rp, 0, &img);
}

View File

@ -14,10 +14,10 @@ use format::ClearValue;
use format::Format;
use format::FormatTy;
use framebuffer::RenderPass;
use framebuffer::RenderPassDescClearValues;
use framebuffer::RenderPassCompatible;
use framebuffer::RenderPassCreationError;
use image::ImageLayout as ImageLayout;
use framebuffer::RenderPassDescClearValues;
use image::ImageLayout;
use sync::AccessFlagBits;
use sync::PipelineStages;
@ -53,8 +53,13 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
/// Returns an iterator to the list of attachments.
#[inline]
fn attachment_descs(&self) -> RenderPassDescAttachments<Self> where Self: Sized {
RenderPassDescAttachments { render_pass: self, num: 0 }
fn attachment_descs(&self) -> RenderPassDescAttachments<Self>
where Self: Sized
{
RenderPassDescAttachments {
render_pass: self,
num: 0,
}
}
/// Returns the number of subpasses of the render pass.
@ -67,8 +72,13 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
/// Returns an iterator to the list of subpasses.
#[inline]
fn subpass_descs(&self) -> RenderPassDescSubpasses<Self> where Self: Sized {
RenderPassDescSubpasses { render_pass: self, num: 0 }
fn subpass_descs(&self) -> RenderPassDescSubpasses<Self>
where Self: Sized
{
RenderPassDescSubpasses {
render_pass: self,
num: 0,
}
}
/// Returns the number of dependencies of the render pass.
@ -81,8 +91,13 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
/// Returns an iterator to the list of dependencies.
#[inline]
fn dependency_descs(&self) -> RenderPassDescDependencies<Self> where Self: Sized {
RenderPassDescDependencies { render_pass: self, num: 0 }
fn dependency_descs(&self) -> RenderPassDescDependencies<Self>
where Self: Sized
{
RenderPassDescDependencies {
render_pass: self,
num: 0,
}
}
/// Returns true if this render pass is compatible with another render pass.
@ -114,122 +129,191 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
/// Returns the number of color attachments of a subpass. Returns `None` if out of range.
#[inline]
fn num_color_attachments(&self, subpass: u32) -> Option<u32> {
(&self).subpass_descs().skip(subpass as usize).next().map(|p| p.color_attachments.len() as u32)
(&self)
.subpass_descs()
.skip(subpass as usize)
.next()
.map(|p| p.color_attachments.len() as u32)
}
/// Returns the number of samples of the attachments of a subpass. Returns `None` if out of
/// range or if the subpass has no attachment. TODO: return an enum instead?
#[inline]
fn num_samples(&self, subpass: u32) -> Option<u32> {
(&self).subpass_descs().skip(subpass as usize).next().and_then(|p| {
// TODO: chain input attachments as well?
p.color_attachments.iter().cloned().chain(p.depth_stencil.clone().into_iter())
.filter_map(|a| (&self).attachment_descs().skip(a.0).next())
.next().map(|a| a.samples)
})
(&self)
.subpass_descs()
.skip(subpass as usize)
.next()
.and_then(|p| {
// TODO: chain input attachments as well?
p.color_attachments
.iter()
.cloned()
.chain(p.depth_stencil.clone().into_iter())
.filter_map(|a| (&self).attachment_descs().skip(a.0).next())
.next()
.map(|a| a.samples)
})
}
/// Returns a tuple whose first element is `true` if there's a depth attachment, and whose
/// second element is `true` if there's a stencil attachment. Returns `None` if out of range.
#[inline]
fn has_depth_stencil_attachment(&self, subpass: u32) -> Option<(bool, bool)> {
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
let atch_num = match p.depth_stencil {
Some((d, _)) => d,
None => return (false, false)
};
(&self)
.subpass_descs()
.skip(subpass as usize)
.next()
.map(|p| {
let atch_num = match p.depth_stencil {
Some((d, _)) => d,
None => return (false, false),
};
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
FormatTy::Depth => (true, false),
FormatTy::Stencil => (false, true),
FormatTy::DepthStencil => (true, true),
_ => unreachable!()
}
})
match (&self)
.attachment_descs()
.skip(atch_num)
.next()
.unwrap()
.format
.ty() {
FormatTy::Depth => (true, false),
FormatTy::Stencil => (false, true),
FormatTy::DepthStencil => (true, true),
_ => unreachable!(),
}
})
}
/// Returns true if a subpass has a depth attachment or a depth-stencil attachment.
#[inline]
fn has_depth(&self, subpass: u32) -> Option<bool> {
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
let atch_num = match p.depth_stencil {
Some((d, _)) => d,
None => return false
};
(&self)
.subpass_descs()
.skip(subpass as usize)
.next()
.map(|p| {
let atch_num = match p.depth_stencil {
Some((d, _)) => d,
None => return false,
};
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
FormatTy::Depth => true,
FormatTy::Stencil => false,
FormatTy::DepthStencil => true,
_ => unreachable!()
}
})
match (&self)
.attachment_descs()
.skip(atch_num)
.next()
.unwrap()
.format
.ty() {
FormatTy::Depth => true,
FormatTy::Stencil => false,
FormatTy::DepthStencil => true,
_ => unreachable!(),
}
})
}
/// Returns true if a subpass has a depth attachment or a depth-stencil attachment whose
/// layout is not `DepthStencilReadOnlyOptimal`.
#[inline]
fn has_writable_depth(&self, subpass: u32) -> Option<bool> {
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
let atch_num = match p.depth_stencil {
Some((d, l)) => {
if l == ImageLayout::DepthStencilReadOnlyOptimal { return false; }
d
},
None => return false
};
(&self)
.subpass_descs()
.skip(subpass as usize)
.next()
.map(|p| {
let atch_num = match p.depth_stencil {
Some((d, l)) => {
if l == ImageLayout::DepthStencilReadOnlyOptimal {
return false;
}
d
},
None => return false,
};
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
FormatTy::Depth => true,
FormatTy::Stencil => false,
FormatTy::DepthStencil => true,
_ => unreachable!()
}
})
match (&self)
.attachment_descs()
.skip(atch_num)
.next()
.unwrap()
.format
.ty() {
FormatTy::Depth => true,
FormatTy::Stencil => false,
FormatTy::DepthStencil => true,
_ => unreachable!(),
}
})
}
/// Returns true if a subpass has a stencil attachment or a depth-stencil attachment.
#[inline]
fn has_stencil(&self, subpass: u32) -> Option<bool> {
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
let atch_num = match p.depth_stencil {
Some((d, _)) => d,
None => return false
};
(&self)
.subpass_descs()
.skip(subpass as usize)
.next()
.map(|p| {
let atch_num = match p.depth_stencil {
Some((d, _)) => d,
None => return false,
};
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
FormatTy::Depth => false,
FormatTy::Stencil => true,
FormatTy::DepthStencil => true,
_ => unreachable!()
}
})
match (&self)
.attachment_descs()
.skip(atch_num)
.next()
.unwrap()
.format
.ty() {
FormatTy::Depth => false,
FormatTy::Stencil => true,
FormatTy::DepthStencil => true,
_ => unreachable!(),
}
})
}
/// Returns true if a subpass has a stencil attachment or a depth-stencil attachment whose
/// layout is not `DepthStencilReadOnlyOptimal`.
#[inline]
fn has_writable_stencil(&self, subpass: u32) -> Option<bool> {
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
let atch_num = match p.depth_stencil {
Some((d, l)) => {
if l == ImageLayout::DepthStencilReadOnlyOptimal { return false; }
d
},
None => return false
};
(&self)
.subpass_descs()
.skip(subpass as usize)
.next()
.map(|p| {
let atch_num = match p.depth_stencil {
Some((d, l)) => {
if l == ImageLayout::DepthStencilReadOnlyOptimal {
return false;
}
d
},
None => return false,
};
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
FormatTy::Depth => false,
FormatTy::Stencil => true,
FormatTy::DepthStencil => true,
_ => unreachable!()
}
})
match (&self)
.attachment_descs()
.skip(atch_num)
.next()
.unwrap()
.format
.ty() {
FormatTy::Depth => false,
FormatTy::Stencil => true,
FormatTy::DepthStencil => true,
_ => unreachable!(),
}
})
}
}
unsafe impl<T> RenderPassDesc for T where T: SafeDeref, T::Target: RenderPassDesc {
unsafe impl<T> RenderPassDesc for T
where T: SafeDeref,
T::Target: RenderPassDesc
{
#[inline]
fn num_attachments(&self) -> usize {
(**self).num_attachments()
@ -268,14 +352,18 @@ pub struct RenderPassDescAttachments<'a, R: ?Sized + 'a> {
num: usize,
}
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescAttachments<'a, R> where R: RenderPassDesc {
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescAttachments<'a, R>
where R: RenderPassDesc
{
type Item = LayoutAttachmentDescription;
fn next(&mut self) -> Option<LayoutAttachmentDescription> {
if self.num < self.render_pass.num_attachments() {
let n = self.num;
self.num += 1;
Some(self.render_pass.attachment_desc(n).expect("Wrong RenderPassDesc implementation"))
Some(self.render_pass
.attachment_desc(n)
.expect("Wrong RenderPassDesc implementation"))
} else {
None
}
@ -289,14 +377,18 @@ pub struct RenderPassDescSubpasses<'a, R: ?Sized + 'a> {
num: usize,
}
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescSubpasses<'a, R> where R: RenderPassDesc {
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescSubpasses<'a, R>
where R: RenderPassDesc
{
type Item = LayoutPassDescription;
fn next(&mut self) -> Option<LayoutPassDescription> {
if self.num < self.render_pass.num_subpasses() {
let n = self.num;
self.num += 1;
Some(self.render_pass.subpass_desc(n).expect("Wrong RenderPassDesc implementation"))
Some(self.render_pass
.subpass_desc(n)
.expect("Wrong RenderPassDesc implementation"))
} else {
None
}
@ -310,14 +402,18 @@ pub struct RenderPassDescDependencies<'a, R: ?Sized + 'a> {
num: usize,
}
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescDependencies<'a, R> where R: RenderPassDesc {
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescDependencies<'a, R>
where R: RenderPassDesc
{
type Item = LayoutPassDependencyDescription;
fn next(&mut self) -> Option<LayoutPassDependencyDescription> {
if self.num < self.render_pass.num_dependencies() {
let n = self.num;
self.num += 1;
Some(self.render_pass.dependency_desc(n).expect("Wrong RenderPassDesc implementation"))
Some(self.render_pass
.dependency_desc(n)
.expect("Wrong RenderPassDesc implementation"))
} else {
None
}
@ -389,22 +485,22 @@ impl LayoutAttachmentDescription {
#[derive(Debug, Clone)]
pub struct LayoutPassDescription {
/// Indices and layouts of attachments to use as color attachments.
pub color_attachments: Vec<(usize, ImageLayout)>, // TODO: Vec is slow
pub color_attachments: Vec<(usize, ImageLayout)>, // TODO: Vec is slow
/// Index and layout of the attachment to use as depth-stencil attachment.
pub depth_stencil: Option<(usize, ImageLayout)>,
/// Indices and layouts of attachments to use as input attachments.
pub input_attachments: Vec<(usize, ImageLayout)>, // TODO: Vec is slow
pub input_attachments: Vec<(usize, ImageLayout)>, // TODO: Vec is slow
/// If not empty, each color attachment will be resolved into each corresponding entry of
/// this list.
///
/// If this value is not empty, it **must** be the same length as `color_attachments`.
pub resolve_attachments: Vec<(usize, ImageLayout)>, // TODO: Vec is slow
pub resolve_attachments: Vec<(usize, ImageLayout)>, // TODO: Vec is slow
/// Indices of attachments that will be preserved during this pass.
pub preserve_attachments: Vec<usize>, // TODO: Vec is slow
pub preserve_attachments: Vec<usize>, // TODO: Vec is slow
}
/// Describes a dependency between two passes of a render pass.

View File

@ -7,13 +7,13 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::iter;
use format::ClearValue;
use framebuffer::LayoutAttachmentDescription;
use framebuffer::LayoutPassDependencyDescription;
use framebuffer::LayoutPassDescription;
use framebuffer::RenderPassDesc;
use framebuffer::RenderPassDescClearValues;
use framebuffer::LayoutAttachmentDescription;
use framebuffer::LayoutPassDescription;
use framebuffer::LayoutPassDependencyDescription;
use std::iter;
/// Description of an empty render pass.
///
@ -52,12 +52,12 @@ unsafe impl RenderPassDesc for EmptySinglePassRenderPassDesc {
fn subpass_desc(&self, num: usize) -> Option<LayoutPassDescription> {
if num == 0 {
Some(LayoutPassDescription {
color_attachments: vec![],
depth_stencil: None,
input_attachments: vec![],
resolve_attachments: vec![],
preserve_attachments: vec![],
})
color_attachments: vec![],
depth_stencil: None,
input_attachments: vec![],
resolve_attachments: vec![],
preserve_attachments: vec![],
})
} else {
None
}
@ -75,11 +75,7 @@ unsafe impl RenderPassDesc for EmptySinglePassRenderPassDesc {
#[inline]
fn num_color_attachments(&self, subpass: u32) -> Option<u32> {
if subpass == 0 {
Some(0)
} else {
None
}
if subpass == 0 { Some(0) } else { None }
}
#[inline]
@ -98,45 +94,29 @@ unsafe impl RenderPassDesc for EmptySinglePassRenderPassDesc {
#[inline]
fn has_depth(&self, subpass: u32) -> Option<bool> {
if subpass == 0 {
Some(false)
} else {
None
}
if subpass == 0 { Some(false) } else { None }
}
#[inline]
fn has_writable_depth(&self, subpass: u32) -> Option<bool> {
if subpass == 0 {
Some(false)
} else {
None
}
if subpass == 0 { Some(false) } else { None }
}
#[inline]
fn has_stencil(&self, subpass: u32) -> Option<bool> {
if subpass == 0 {
Some(false)
} else {
None
}
if subpass == 0 { Some(false) } else { None }
}
#[inline]
fn has_writable_stencil(&self, subpass: u32) -> Option<bool> {
if subpass == 0 {
Some(false)
} else {
None
}
if subpass == 0 { Some(false) } else { None }
}
}
unsafe impl RenderPassDescClearValues<Vec<ClearValue>> for EmptySinglePassRenderPassDesc {
#[inline]
fn convert_clear_values(&self, values: Vec<ClearValue>) -> Box<Iterator<Item = ClearValue>> {
assert!(values.is_empty()); // TODO: error instead
assert!(values.is_empty()); // TODO: error instead
Box::new(iter::empty())
}
}

View File

@ -7,6 +7,7 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::cmp;
use std::error;
use std::fmt;
@ -14,7 +15,6 @@ use std::marker::PhantomData;
use std::mem;
use std::ptr;
use std::sync::Arc;
use smallvec::SmallVec;
use device::Device;
use device::DeviceOwned;
@ -26,8 +26,8 @@ use framebuffer::LayoutAttachmentDescription;
use framebuffer::LayoutPassDependencyDescription;
use framebuffer::LayoutPassDescription;
use framebuffer::RenderPassAbstract;
use framebuffer::RenderPassDescClearValues;
use framebuffer::RenderPassDesc;
use framebuffer::RenderPassDescClearValues;
use framebuffer::RenderPassSys;
use framebuffer::ensure_image_view_compatible;
use image::ImageViewAccess;
@ -129,7 +129,10 @@ pub struct FramebufferBuilder<Rp, A> {
attachments: A,
}
impl<Rp, A> fmt::Debug for FramebufferBuilder<Rp, A> where Rp: fmt::Debug, A: fmt::Debug {
impl<Rp, A> fmt::Debug for FramebufferBuilder<Rp, A>
where Rp: fmt::Debug,
A: fmt::Debug
{
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt.debug_struct("FramebufferBuilder")
@ -149,7 +152,7 @@ enum FramebufferBuilderDimensions {
impl<Rp, A> FramebufferBuilder<Rp, A>
where Rp: RenderPassAbstract,
A: AttachmentsList,
A: AttachmentsList
{
/// Appends an attachment to the prototype of the framebuffer.
///
@ -160,14 +163,14 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
{
if self.raw_ids.len() >= self.render_pass.num_attachments() {
return Err(FramebufferCreationError::AttachmentsCountMismatch {
expected: self.render_pass.num_attachments(),
obtained: self.raw_ids.len() + 1,
});
expected: self.render_pass.num_attachments(),
obtained: self.raw_ids.len() + 1,
});
}
match ensure_image_view_compatible(&self.render_pass, self.raw_ids.len(), &attachment) {
Ok(()) => (),
Err(err) => return Err(FramebufferCreationError::IncompatibleAttachment(err))
Err(err) => return Err(FramebufferCreationError::IncompatibleAttachment(err)),
};
let img_dims = attachment.dimensions();
@ -180,16 +183,16 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
},
FramebufferBuilderDimensions::AutoIdentical(Some(current)) => {
if img_dims.width() != current[0] || img_dims.height() != current[1] ||
img_dims.array_layers() != current[2]
img_dims.array_layers() != current[2]
{
return Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
expected: current,
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()]
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()],
});
}
FramebufferBuilderDimensions::AutoIdentical(Some(current))
}
},
FramebufferBuilderDimensions::AutoSmaller(None) => {
let dims = [img_dims.width(), img_dims.height(), img_dims.array_layers()];
FramebufferBuilderDimensions::AutoSmaller(Some(dims))
@ -198,38 +201,36 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
let new_dims = [
cmp::min(current[0], img_dims.width()),
cmp::min(current[1], img_dims.height()),
cmp::min(current[2], img_dims.array_layers())
cmp::min(current[2], img_dims.array_layers()),
];
FramebufferBuilderDimensions::AutoSmaller(Some(new_dims))
},
FramebufferBuilderDimensions::Specific(current) => {
if img_dims.width() < current[0] || img_dims.height() < current[1] ||
img_dims.array_layers() < current[2]
img_dims.array_layers() < current[2]
{
return Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
expected: current,
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()]
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()],
});
}
FramebufferBuilderDimensions::Specific([
img_dims.width(),
img_dims.height(),
img_dims.array_layers()
])
}
FramebufferBuilderDimensions::Specific(
[img_dims.width(), img_dims.height(), img_dims.array_layers()],
)
},
};
let mut raw_ids = self.raw_ids;
raw_ids.push(attachment.inner().internal_object());
Ok(FramebufferBuilder {
render_pass: self.render_pass,
raw_ids: raw_ids,
dimensions: dimensions,
attachments: (self.attachments, attachment),
})
render_pass: self.render_pass,
raw_ids: raw_ids,
dimensions: dimensions,
attachments: (self.attachments, attachment),
})
}
/// Turns this builder into a `FramebufferBuilder<Rp, Box<AttachmentsList>>`.
@ -258,9 +259,9 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
// Check the number of attachments.
if self.raw_ids.len() != self.render_pass.num_attachments() {
return Err(FramebufferCreationError::AttachmentsCountMismatch {
expected: self.render_pass.num_attachments(),
obtained: self.raw_ids.len(),
});
expected: self.render_pass.num_attachments(),
obtained: self.raw_ids.len(),
});
}
// Compute the dimensions.
@ -279,11 +280,12 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
// Checking the dimensions against the limits.
{
let limits = device.physical_device().limits();
let limits = [limits.max_framebuffer_width(), limits.max_framebuffer_height(),
limits.max_framebuffer_layers()];
if dimensions[0] > limits[0] || dimensions[1] > limits[1] ||
dimensions[2] > limits[2]
{
let limits = [
limits.max_framebuffer_width(),
limits.max_framebuffer_height(),
limits.max_framebuffer_layers(),
];
if dimensions[0] > limits[0] || dimensions[1] > limits[1] || dimensions[2] > limits[2] {
return Err(FramebufferCreationError::DimensionsTooLarge);
}
}
@ -294,7 +296,7 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
let infos = vk::FramebufferCreateInfo {
sType: vk::STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
renderPass: self.render_pass.inner().internal_object(),
attachmentCount: self.raw_ids.len() as u32,
pAttachments: self.raw_ids.as_ptr(),
@ -304,18 +306,20 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateFramebuffer(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateFramebuffer(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(Framebuffer {
device: device,
render_pass: self.render_pass,
framebuffer: framebuffer,
dimensions: dimensions,
resources: self.attachments,
})
device: device,
render_pass: self.render_pass,
framebuffer: framebuffer,
dimensions: dimensions,
resources: self.attachments,
})
}
}
@ -377,7 +381,9 @@ unsafe impl<Rp, A> FramebufferAbstract for Framebuffer<Rp, A>
}
}
unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A> where Rp: RenderPassDesc {
unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A>
where Rp: RenderPassDesc
{
#[inline]
fn num_attachments(&self) -> usize {
self.render_pass.num_attachments()
@ -392,7 +398,7 @@ unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A> where Rp: RenderPassDes
fn num_subpasses(&self) -> usize {
self.render_pass.num_subpasses()
}
#[inline]
fn subpass_desc(&self, num: usize) -> Option<LayoutPassDescription> {
self.render_pass.subpass_desc(num)
@ -418,7 +424,9 @@ unsafe impl<C, Rp, A> RenderPassDescClearValues<C> for Framebuffer<Rp, A>
}
}
unsafe impl<Rp, A> RenderPassAbstract for Framebuffer<Rp, A> where Rp: RenderPassAbstract {
unsafe impl<Rp, A> RenderPassAbstract for Framebuffer<Rp, A>
where Rp: RenderPassAbstract
{
#[inline]
fn inner(&self) -> RenderPassSys {
self.render_pass.inner()
@ -494,8 +502,8 @@ impl error::Error for FramebufferCreationError {
fn description(&self) -> &str {
match *self {
FramebufferCreationError::OomError(_) => "no memory available",
FramebufferCreationError::DimensionsTooLarge => "the dimensions of the framebuffer \
are too large",
FramebufferCreationError::DimensionsTooLarge =>
"the dimensions of the framebuffer are too large",
FramebufferCreationError::AttachmentDimensionsIncompatible { .. } => {
"the attachment has a size that isn't compatible with the framebuffer dimensions"
},
@ -537,19 +545,20 @@ impl From<Error> for FramebufferCreationError {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use format::Format;
use framebuffer::EmptySinglePassRenderPassDesc;
use framebuffer::Framebuffer;
use framebuffer::FramebufferCreationError;
use framebuffer::RenderPassDesc;
use image::attachment::AttachmentImage;
use std::sync::Arc;
#[test]
fn simple_create() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
@ -562,22 +571,29 @@ mod tests {
color: [color],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let image = AttachmentImage::new(device.clone(), [1024, 768],
Format::R8G8B8A8Unorm).unwrap();
let _ = Framebuffer::start(render_pass).add(image.clone()).unwrap().build().unwrap();
let image = AttachmentImage::new(device.clone(), [1024, 768], Format::R8G8B8A8Unorm)
.unwrap();
let _ = Framebuffer::start(render_pass)
.add(image.clone())
.unwrap()
.build()
.unwrap();
}
#[test]
fn check_device_limits() {
let (device, _) = gfx_dev_and_queue!();
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
let rp = EmptySinglePassRenderPassDesc
.build_render_pass(device)
.unwrap();
let res = Framebuffer::with_dimensions(rp, [0xffffffff, 0xffffffff, 0xffffffff]).build();
match res {
Err(FramebufferCreationError::DimensionsTooLarge) => (),
_ => panic!()
_ => panic!(),
}
}
@ -585,7 +601,8 @@ mod tests {
fn attachment_format_mismatch() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
@ -598,14 +615,14 @@ mod tests {
color: [color],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let image = AttachmentImage::new(device.clone(), [1024, 768],
Format::R8Unorm).unwrap();
let image = AttachmentImage::new(device.clone(), [1024, 768], Format::R8Unorm).unwrap();
match Framebuffer::start(render_pass).add(image.clone()) {
Err(FramebufferCreationError::IncompatibleAttachment(_)) => (),
_ => panic!()
_ => panic!(),
}
}
@ -614,8 +631,9 @@ mod tests {
#[test]
fn attachment_dims_larger_than_specified_valid() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
@ -628,20 +646,24 @@ mod tests {
color: [color],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let img = AttachmentImage::new(device.clone(), [600, 600], Format::R8G8B8A8Unorm).unwrap();
let _ = Framebuffer::with_dimensions(render_pass, [512, 512, 1])
.add(img).unwrap()
.build().unwrap();
.add(img)
.unwrap()
.build()
.unwrap();
}
#[test]
fn attachment_dims_smaller_than_specified() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
@ -654,24 +676,29 @@ mod tests {
color: [color],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let img = AttachmentImage::new(device.clone(), [512, 700], Format::R8G8B8A8Unorm).unwrap();
match Framebuffer::with_dimensions(render_pass, [600, 600, 1]).add(img) {
Err(FramebufferCreationError::AttachmentDimensionsIncompatible { expected, obtained }) => {
Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
expected,
obtained,
}) => {
assert_eq!(expected, [600, 600, 1]);
assert_eq!(obtained, [512, 700, 1]);
},
_ => panic!()
_ => panic!(),
}
}
#[test]
fn multi_attachments_dims_not_identical() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
a: {
load: Clear,
@ -690,25 +717,30 @@ mod tests {
color: [a, b],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let a = AttachmentImage::new(device.clone(), [512, 512], Format::R8G8B8A8Unorm).unwrap();
let b = AttachmentImage::new(device.clone(), [512, 513], Format::R8G8B8A8Unorm).unwrap();
match Framebuffer::start(render_pass).add(a).unwrap().add(b) {
Err(FramebufferCreationError::AttachmentDimensionsIncompatible { expected, obtained }) => {
Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
expected,
obtained,
}) => {
assert_eq!(expected, [512, 512, 1]);
assert_eq!(obtained, [512, 513, 1]);
},
_ => panic!()
_ => panic!(),
}
}
#[test]
fn multi_attachments_auto_smaller() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
a: {
load: Clear,
@ -727,27 +759,32 @@ mod tests {
color: [a, b],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let a = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
let b = AttachmentImage::new(device.clone(), [512, 128], Format::R8G8B8A8Unorm).unwrap();
let fb = Framebuffer::with_intersecting_dimensions(render_pass)
.add(a).unwrap()
.add(b).unwrap()
.build().unwrap();
.add(a)
.unwrap()
.add(b)
.unwrap()
.build()
.unwrap();
match (fb.width(), fb.height(), fb.layers()) {
(256, 128, 1) => (),
_ => panic!()
_ => panic!(),
}
}
#[test]
fn not_enough_attachments() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
a: {
load: Clear,
@ -766,26 +803,31 @@ mod tests {
color: [a, b],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let img = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
let res = Framebuffer::with_intersecting_dimensions(render_pass)
.add(img).unwrap()
.add(img)
.unwrap()
.build();
match res {
Err(FramebufferCreationError::AttachmentsCountMismatch { expected: 2,
obtained: 1 }) => (),
_ => panic!()
Err(FramebufferCreationError::AttachmentsCountMismatch {
expected: 2,
obtained: 1,
}) => (),
_ => panic!(),
}
}
#[test]
fn too_many_attachments() {
let (device, _) = gfx_dev_and_queue!();
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
let render_pass = Arc::new(
single_pass_renderpass!(device.clone(),
attachments: {
a: {
load: Clear,
@ -798,19 +840,23 @@ mod tests {
color: [a],
depth_stencil: {}
}
).unwrap());
).unwrap(),
);
let a = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
let b = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
let res = Framebuffer::with_intersecting_dimensions(render_pass)
.add(a).unwrap()
.add(a)
.unwrap()
.add(b);
match res {
Err(FramebufferCreationError::AttachmentsCountMismatch { expected: 1,
obtained: 2 }) => (),
_ => panic!()
Err(FramebufferCreationError::AttachmentsCountMismatch {
expected: 1,
obtained: 2,
}) => (),
_ => panic!(),
}
}
@ -818,19 +864,25 @@ mod tests {
fn empty_working() {
let (device, _) = gfx_dev_and_queue!();
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
let _ = Framebuffer::with_dimensions(rp, [512, 512, 1]).build().unwrap();
let rp = EmptySinglePassRenderPassDesc
.build_render_pass(device)
.unwrap();
let _ = Framebuffer::with_dimensions(rp, [512, 512, 1])
.build()
.unwrap();
}
#[test]
fn cant_determine_dimensions_auto() {
let (device, _) = gfx_dev_and_queue!();
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
let rp = EmptySinglePassRenderPassDesc
.build_render_pass(device)
.unwrap();
let res = Framebuffer::start(rp).build();
match res {
Err(FramebufferCreationError::CantDetermineDimensions) => (),
_ => panic!()
_ => panic!(),
}
}
@ -838,11 +890,13 @@ mod tests {
fn cant_determine_dimensions_intersect() {
let (device, _) = gfx_dev_and_queue!();
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
let rp = EmptySinglePassRenderPassDesc
.build_render_pass(device)
.unwrap();
let res = Framebuffer::with_intersecting_dimensions(rp).build();
match res {
Err(FramebufferCreationError::CantDetermineDimensions) => (),
_ => panic!()
_ => panic!(),
}
}
}

View File

@ -8,11 +8,11 @@
// according to those terms.
//! Targets on which your draw commands are executed.
//!
//!
//! # Render passes and framebuffers
//!
//! There are two concepts in Vulkan:
//!
//!
//! - A *render pass* describes the target which you are going to render to. It is a collection
//! of descriptions of one or more attachments (ie. image that are rendered to), and of one or
//! multiples subpasses. The render pass contains the format and number of samples of each
@ -91,17 +91,17 @@
//!
pub use self::attachments_list::AttachmentsList;
pub use self::compat_atch::ensure_image_view_compatible;
pub use self::compat_atch::IncompatibleRenderPassAttachmentError;
pub use self::compat_atch::ensure_image_view_compatible;
pub use self::desc::LayoutAttachmentDescription;
pub use self::desc::LayoutPassDescription;
pub use self::desc::LayoutPassDependencyDescription;
pub use self::desc::LayoutPassDescription;
pub use self::desc::LoadOp;
pub use self::desc::RenderPassDesc;
pub use self::desc::RenderPassDescAttachments;
pub use self::desc::RenderPassDescSubpasses;
pub use self::desc::RenderPassDescDependencies;
pub use self::desc::RenderPassDescSubpasses;
pub use self::desc::StoreOp;
pub use self::desc::LoadOp;
pub use self::empty::EmptySinglePassRenderPassDesc;
pub use self::framebuffer::Framebuffer;
pub use self::framebuffer::FramebufferBuilder;
@ -111,9 +111,9 @@ pub use self::sys::RenderPass;
pub use self::sys::RenderPassCreationError;
pub use self::sys::RenderPassSys;
pub use self::traits::FramebufferAbstract;
pub use self::traits::RenderPassDescClearValues;
pub use self::traits::RenderPassCompatible;
pub use self::traits::RenderPassAbstract;
pub use self::traits::RenderPassCompatible;
pub use self::traits::RenderPassDescClearValues;
pub use self::traits::RenderPassSubpassInterface;
pub use self::traits::Subpass;

View File

@ -7,6 +7,7 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::marker::PhantomData;
@ -14,7 +15,6 @@ use std::mem;
use std::ptr;
use std::sync::Arc;
use std::sync::Mutex;
use smallvec::SmallVec;
use device::Device;
use device::DeviceOwned;
@ -24,9 +24,9 @@ use framebuffer::LayoutAttachmentDescription;
use framebuffer::LayoutPassDependencyDescription;
use framebuffer::LayoutPassDescription;
use framebuffer::LoadOp;
use framebuffer::RenderPassDescClearValues;
use framebuffer::RenderPassDesc;
use framebuffer::RenderPassAbstract;
use framebuffer::RenderPassDesc;
use framebuffer::RenderPassDescClearValues;
use Error;
use OomError;
@ -52,7 +52,9 @@ pub struct RenderPass<D> {
granularity: Mutex<Option<[u32; 2]>>,
}
impl<D> RenderPass<D> where D: RenderPassDesc {
impl<D> RenderPass<D>
where D: RenderPassDesc
{
/// Builds a new render pass.
///
/// # Panic
@ -62,42 +64,61 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
/// mode.
///
pub fn new(device: Arc<Device>, description: D)
-> Result<RenderPass<D>, RenderPassCreationError>
{
-> Result<RenderPass<D>, RenderPassCreationError> {
let vk = device.pointers();
// If the first use of an attachment in this render pass is as an input attachment, and
// the attachment is not also used as a color or depth/stencil attachment in the same
// subpass, then loadOp must not be VK_ATTACHMENT_LOAD_OP_CLEAR
debug_assert!(description.attachment_descs().enumerate().all(|(atch_num, attachment)| {
debug_assert!(description.attachment_descs().enumerate().all(|(atch_num,
attachment)| {
if attachment.load != LoadOp::Clear {
return true;
}
for p in description.subpass_descs() {
if p.color_attachments.iter().find(|&&(a, _)| a == atch_num).is_some() { return true; }
if let Some((a, _)) = p.depth_stencil { if a == atch_num { return true; } }
if p.input_attachments.iter().find(|&&(a, _)| a == atch_num).is_some() { return false; }
if p.color_attachments
.iter()
.find(|&&(a, _)| a == atch_num)
.is_some()
{
return true;
}
if let Some((a, _)) = p.depth_stencil {
if a == atch_num {
return true;
}
}
if p.input_attachments
.iter()
.find(|&&(a, _)| a == atch_num)
.is_some()
{
return false;
}
}
true
}));
let attachments = description.attachment_descs().map(|attachment| {
debug_assert!(attachment.samples.is_power_of_two());
let attachments = description
.attachment_descs()
.map(|attachment| {
debug_assert!(attachment.samples.is_power_of_two());
vk::AttachmentDescription {
flags: 0, // FIXME: may alias flag
format: attachment.format as u32,
samples: attachment.samples,
loadOp: attachment.load as u32,
storeOp: attachment.store as u32,
stencilLoadOp: attachment.stencil_load as u32,
stencilStoreOp: attachment.stencil_store as u32,
initialLayout: attachment.initial_layout as u32,
finalLayout: attachment.final_layout as u32,
}
}).collect::<SmallVec<[_; 16]>>();
vk::AttachmentDescription {
flags: 0, // FIXME: may alias flag
format: attachment.format as u32,
samples: attachment.samples,
loadOp: attachment.load as u32,
storeOp: attachment.store as u32,
stencilLoadOp: attachment.stencil_load as u32,
stencilStoreOp: attachment.stencil_store as u32,
initialLayout: attachment.initial_layout as u32,
finalLayout: attachment.final_layout as u32,
}
})
.collect::<SmallVec<[_; 16]>>();
// We need to pass pointers to vkAttachmentReference structs when creating the render pass.
// Therefore we need to allocate them in advance.
@ -105,71 +126,102 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
// This block allocates, for each pass, in order, all color attachment references, then all
// input attachment references, then all resolve attachment references, then the depth
// stencil attachment reference.
let attachment_references = description.subpass_descs().flat_map(|pass| {
// Performing some validation with debug asserts.
debug_assert!(pass.resolve_attachments.is_empty() ||
pass.resolve_attachments.len() == pass.color_attachments.len());
debug_assert!(pass.resolve_attachments.iter().all(|a| {
attachments[a.0].samples == 1
}));
debug_assert!(pass.resolve_attachments.is_empty() ||
pass.color_attachments.iter().all(|a| {
attachments[a.0].samples > 1
}));
debug_assert!(pass.resolve_attachments.is_empty() ||
pass.resolve_attachments.iter().zip(pass.color_attachments.iter())
.all(|(r, c)| {
attachments[r.0].format == attachments[c.0].format
}));
debug_assert!(pass.color_attachments.iter().cloned()
.chain(pass.depth_stencil.clone().into_iter())
.chain(pass.input_attachments.iter().cloned())
.chain(pass.resolve_attachments.iter().cloned())
.all(|(a, _)| {
pass.preserve_attachments.iter().find(|&&b| a == b).is_none()
}));
debug_assert!(pass.color_attachments.iter().cloned()
.chain(pass.depth_stencil.clone().into_iter())
.all(|(atch, layout)| {
if let Some(r) = pass.input_attachments.iter()
.find(|r| r.0 == atch)
{
r.1 == layout
} else {
true
}
}));
let attachment_references = description
.subpass_descs()
.flat_map(|pass| {
// Performing some validation with debug asserts.
debug_assert!(pass.resolve_attachments.is_empty() ||
pass.resolve_attachments.len() == pass.color_attachments.len());
debug_assert!(pass.resolve_attachments
.iter()
.all(|a| attachments[a.0].samples == 1));
debug_assert!(pass.resolve_attachments.is_empty() ||
pass.color_attachments
.iter()
.all(|a| attachments[a.0].samples > 1));
debug_assert!(pass.resolve_attachments.is_empty() ||
pass.resolve_attachments
.iter()
.zip(pass.color_attachments.iter())
.all(|(r, c)| {
attachments[r.0].format == attachments[c.0].format
}));
debug_assert!(pass.color_attachments
.iter()
.cloned()
.chain(pass.depth_stencil.clone().into_iter())
.chain(pass.input_attachments.iter().cloned())
.chain(pass.resolve_attachments.iter().cloned())
.all(|(a, _)| {
pass.preserve_attachments
.iter()
.find(|&&b| a == b)
.is_none()
}));
debug_assert!(
pass.color_attachments
.iter()
.cloned()
.chain(pass.depth_stencil.clone().into_iter())
.all(|(atch, layout)| if let Some(r) =
pass.input_attachments.iter().find(|r| r.0 == atch)
{
r.1 == layout
} else {
true
})
);
let resolve = pass.resolve_attachments.into_iter().map(|(offset, img_la)| {
debug_assert!(offset < attachments.len());
vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, }
});
let resolve = pass.resolve_attachments
.into_iter()
.map(|(offset, img_la)| {
debug_assert!(offset < attachments.len());
vk::AttachmentReference {
attachment: offset as u32,
layout: img_la as u32,
}
});
let color = pass.color_attachments.into_iter().map(|(offset, img_la)| {
debug_assert!(offset < attachments.len());
vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, }
});
let color = pass.color_attachments.into_iter().map(|(offset, img_la)| {
debug_assert!(offset < attachments.len());
vk::AttachmentReference {
attachment: offset as u32,
layout: img_la as u32,
}
});
let input = pass.input_attachments.into_iter().map(|(offset, img_la)| {
debug_assert!(offset < attachments.len());
vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, }
});
let input = pass.input_attachments.into_iter().map(|(offset, img_la)| {
debug_assert!(offset < attachments.len());
vk::AttachmentReference {
attachment: offset as u32,
layout: img_la as u32,
}
});
let depthstencil = if let Some((offset, img_la)) = pass.depth_stencil {
Some(vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, })
} else {
None
}.into_iter();
let depthstencil = if let Some((offset, img_la)) = pass.depth_stencil {
Some(vk::AttachmentReference {
attachment: offset as u32,
layout: img_la as u32,
})
} else {
None
}.into_iter();
color.chain(input).chain(resolve).chain(depthstencil)
}).collect::<SmallVec<[_; 16]>>();
color.chain(input).chain(resolve).chain(depthstencil)
})
.collect::<SmallVec<[_; 16]>>();
// Same as `attachment_references` but only for the preserve attachments.
// This is separate because attachment references are u32s and not `vkAttachmentReference`
// structs.
let preserve_attachments_references = description.subpass_descs().flat_map(|pass| {
pass.preserve_attachments.into_iter().map(|offset| offset as u32)
}).collect::<SmallVec<[_; 16]>>();
let preserve_attachments_references = description
.subpass_descs()
.flat_map(|pass| {
pass.preserve_attachments
.into_iter()
.map(|offset| offset as u32)
})
.collect::<SmallVec<[_; 16]>>();
// Now iterating over passes.
let passes = unsafe {
@ -182,7 +234,7 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
for pass in description.subpass_descs() {
if pass.color_attachments.len() as u32 >
device.physical_device().limits().max_color_attachments()
device.physical_device().limits().max_color_attachments()
{
return Err(RenderPassCreationError::ColorAttachmentsLimitExceeded);
}
@ -201,26 +253,39 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
ptr::null()
};
let preserve_attachments = preserve_attachments_references.as_ptr()
.offset(preserve_ref_index as isize);
let preserve_attachments = preserve_attachments_references
.as_ptr()
.offset(preserve_ref_index as isize);
preserve_ref_index += pass.preserve_attachments.len();
out.push(vk::SubpassDescription {
flags: 0, // reserved
pipelineBindPoint: vk::PIPELINE_BIND_POINT_GRAPHICS,
inputAttachmentCount: pass.input_attachments.len() as u32,
pInputAttachments: if pass.input_attachments.is_empty() { ptr::null() }
else { input_attachments },
colorAttachmentCount: pass.color_attachments.len() as u32,
pColorAttachments: if pass.color_attachments.is_empty() { ptr::null() }
else { color_attachments },
pResolveAttachments: if pass.resolve_attachments.is_empty() { ptr::null() }
else { resolve_attachments },
pDepthStencilAttachment: depth_stencil,
preserveAttachmentCount: pass.preserve_attachments.len() as u32,
pPreserveAttachments: if pass.preserve_attachments.is_empty() { ptr::null() }
else { preserve_attachments },
});
flags: 0, // reserved
pipelineBindPoint: vk::PIPELINE_BIND_POINT_GRAPHICS,
inputAttachmentCount: pass.input_attachments.len() as u32,
pInputAttachments: if pass.input_attachments.is_empty() {
ptr::null()
} else {
input_attachments
},
colorAttachmentCount: pass.color_attachments.len() as u32,
pColorAttachments: if pass.color_attachments.is_empty() {
ptr::null()
} else {
color_attachments
},
pResolveAttachments: if pass.resolve_attachments.is_empty() {
ptr::null()
} else {
resolve_attachments
},
pDepthStencilAttachment: depth_stencil,
preserveAttachmentCount: pass.preserve_attachments.len() as u32,
pPreserveAttachments: if pass.preserve_attachments.is_empty() {
ptr::null()
} else {
preserve_attachments
},
});
}
assert!(!out.is_empty());
@ -231,48 +296,67 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
out
};
let dependencies = description.dependency_descs().map(|dependency| {
debug_assert!(dependency.source_subpass < passes.len());
debug_assert!(dependency.destination_subpass < passes.len());
let dependencies = description
.dependency_descs()
.map(|dependency| {
debug_assert!(dependency.source_subpass < passes.len());
debug_assert!(dependency.destination_subpass < passes.len());
vk::SubpassDependency {
srcSubpass: dependency.source_subpass as u32,
dstSubpass: dependency.destination_subpass as u32,
srcStageMask: dependency.src_stages.into(),
dstStageMask: dependency.dst_stages.into(),
srcAccessMask: dependency.src_access.into(),
dstAccessMask: dependency.dst_access.into(),
dependencyFlags: if dependency.by_region { vk::DEPENDENCY_BY_REGION_BIT } else { 0 },
}
}).collect::<SmallVec<[_; 16]>>();
vk::SubpassDependency {
srcSubpass: dependency.source_subpass as u32,
dstSubpass: dependency.destination_subpass as u32,
srcStageMask: dependency.src_stages.into(),
dstStageMask: dependency.dst_stages.into(),
srcAccessMask: dependency.src_access.into(),
dstAccessMask: dependency.dst_access.into(),
dependencyFlags: if dependency.by_region {
vk::DEPENDENCY_BY_REGION_BIT
} else {
0
},
}
})
.collect::<SmallVec<[_; 16]>>();
let render_pass = unsafe {
let infos = vk::RenderPassCreateInfo {
sType: vk::STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
attachmentCount: attachments.len() as u32,
pAttachments: if attachments.is_empty() { ptr::null() }
else { attachments.as_ptr() },
pAttachments: if attachments.is_empty() {
ptr::null()
} else {
attachments.as_ptr()
},
subpassCount: passes.len() as u32,
pSubpasses: if passes.is_empty() { ptr::null() } else { passes.as_ptr() },
pSubpasses: if passes.is_empty() {
ptr::null()
} else {
passes.as_ptr()
},
dependencyCount: dependencies.len() as u32,
pDependencies: if dependencies.is_empty() { ptr::null() }
else { dependencies.as_ptr() },
pDependencies: if dependencies.is_empty() {
ptr::null()
} else {
dependencies.as_ptr()
},
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateRenderPass(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateRenderPass(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(RenderPass {
device: device.clone(),
render_pass: render_pass,
desc: description,
granularity: Mutex::new(None),
})
device: device.clone(),
render_pass: render_pass,
desc: description,
granularity: Mutex::new(None),
})
}
}
@ -281,9 +365,9 @@ impl RenderPass<EmptySinglePassRenderPassDesc> {
///
/// This method is useful for quick tests.
#[inline]
pub fn empty_single_pass(device: Arc<Device>)
-> Result<RenderPass<EmptySinglePassRenderPassDesc>, RenderPassCreationError>
{
pub fn empty_single_pass(
device: Arc<Device>)
-> Result<RenderPass<EmptySinglePassRenderPassDesc>, RenderPassCreationError> {
RenderPass::new(device, EmptySinglePassRenderPassDesc)
}
}
@ -304,8 +388,7 @@ impl<D> RenderPass<D> {
unsafe {
let vk = self.device.pointers();
let mut out = mem::uninitialized();
vk.GetRenderAreaGranularity(self.device.internal_object(),
self.render_pass, &mut out);
vk.GetRenderAreaGranularity(self.device.internal_object(), self.render_pass, &mut out);
debug_assert_ne!(out.width, 0);
debug_assert_ne!(out.height, 0);
@ -325,12 +408,14 @@ impl<D> RenderPass<D> {
}
}
unsafe impl<D> RenderPassDesc for RenderPass<D> where D: RenderPassDesc {
unsafe impl<D> RenderPassDesc for RenderPass<D>
where D: RenderPassDesc
{
#[inline]
fn num_attachments(&self) -> usize {
self.desc.num_attachments()
}
#[inline]
fn attachment_desc(&self, num: usize) -> Option<LayoutAttachmentDescription> {
self.desc.attachment_desc(num)
@ -340,7 +425,7 @@ unsafe impl<D> RenderPassDesc for RenderPass<D> where D: RenderPassDesc {
fn num_subpasses(&self) -> usize {
self.desc.num_subpasses()
}
#[inline]
fn subpass_desc(&self, num: usize) -> Option<LayoutPassDescription> {
self.desc.subpass_desc(num)
@ -366,7 +451,9 @@ unsafe impl<C, D> RenderPassDescClearValues<C> for RenderPass<D>
}
}
unsafe impl<D> RenderPassAbstract for RenderPass<D> where D: RenderPassDesc {
unsafe impl<D> RenderPassAbstract for RenderPass<D>
where D: RenderPassDesc
{
#[inline]
fn inner(&self) -> RenderPassSys {
RenderPassSys(self.render_pass, PhantomData)
@ -380,7 +467,9 @@ unsafe impl<D> DeviceOwned for RenderPass<D> {
}
}
impl<D> fmt::Debug for RenderPass<D> where D: fmt::Debug {
impl<D> fmt::Debug for RenderPass<D>
where D: fmt::Debug
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt.debug_struct("RenderPass")
.field("raw", &self.render_pass)
@ -437,7 +526,7 @@ impl error::Error for RenderPassCreationError {
fn cause(&self) -> Option<&error::Error> {
match *self {
RenderPassCreationError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -466,7 +555,7 @@ impl From<Error> for RenderPassCreationError {
err @ Error::OutOfDeviceMemory => {
RenderPassCreationError::OomError(OomError::from(err))
},
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -488,10 +577,11 @@ mod tests {
let (device, _) = gfx_dev_and_queue!();
if device.physical_device().limits().max_color_attachments() >= 10 {
return; // test ignored
return; // test ignored
}
let rp = single_pass_renderpass! {
let rp =
single_pass_renderpass! {
device.clone(),
attachments: {
a1: { load: Clear, store: DontCare, format: Format::R8G8B8A8Unorm, samples: 1, },
@ -513,7 +603,7 @@ mod tests {
match rp {
Err(RenderPassCreationError::ColorAttachmentsLimitExceeded) => (),
_ => panic!()
_ => panic!(),
}
}

View File

@ -51,7 +51,10 @@ pub unsafe trait FramebufferAbstract: RenderPassAbstract {
}
}
unsafe impl<T> FramebufferAbstract for T where T: SafeDeref, T::Target: FramebufferAbstract {
unsafe impl<T> FramebufferAbstract for T
where T: SafeDeref,
T::Target: FramebufferAbstract
{
#[inline]
fn inner(&self) -> FramebufferSys {
FramebufferAbstract::inner(&**self)
@ -99,7 +102,10 @@ pub unsafe trait RenderPassAbstract: DeviceOwned + RenderPassDesc {
fn inner(&self) -> RenderPassSys;
}
unsafe impl<T> RenderPassAbstract for T where T: SafeDeref, T::Target: RenderPassAbstract {
unsafe impl<T> RenderPassAbstract for T
where T: SafeDeref,
T::Target: RenderPassAbstract
{
#[inline]
fn inner(&self) -> RenderPassSys {
(**self).inner()
@ -137,7 +143,8 @@ pub unsafe trait RenderPassDescClearValues<C> {
}
unsafe impl<T, C> RenderPassDescClearValues<C> for T
where T: SafeDeref, T::Target: RenderPassDescClearValues<C>
where T: SafeDeref,
T::Target: RenderPassDescClearValues<C>
{
#[inline]
fn convert_clear_values(&self, vals: C) -> Box<Iterator<Item = ClearValue>> {
@ -164,10 +171,13 @@ pub unsafe trait RenderPassSubpassInterface<Other: ?Sized>: RenderPassDesc
}
unsafe impl<A, B: ?Sized> RenderPassSubpassInterface<B> for A
where A: RenderPassDesc, B: ShaderInterfaceDef
where A: RenderPassDesc,
B: ShaderInterfaceDef
{
fn is_compatible_with(&self, subpass: u32, other: &B) -> bool {
let pass_descr = match RenderPassDesc::subpass_descs(self).skip(subpass as usize).next() {
let pass_descr = match RenderPassDesc::subpass_descs(self)
.skip(subpass as usize)
.next() {
Some(s) => s,
None => return false,
};
@ -179,7 +189,11 @@ unsafe impl<A, B: ?Sized> RenderPassSubpassInterface<B> for A
None => return false,
};
let attachment_desc = (&self).attachment_descs().skip(attachment_id).next().unwrap();
let attachment_desc = (&self)
.attachment_descs()
.skip(attachment_id)
.next()
.unwrap();
// FIXME: compare formats depending on the number of components and data type
/*if attachment_desc.format != element.format {
@ -201,7 +215,9 @@ unsafe impl<A, B: ?Sized> RenderPassSubpassInterface<B> for A
// TODO: once specialization lands, this trait can be specialized for pairs that are known to
// always be compatible
// TODO: maybe this can be unimplemented on some pairs, to provide compile-time checks?
pub unsafe trait RenderPassCompatible<Other: ?Sized>: RenderPassDesc where Other: RenderPassDesc {
pub unsafe trait RenderPassCompatible<Other: ?Sized>: RenderPassDesc
where Other: RenderPassDesc
{
/// Returns `true` if this layout is compatible with the other layout, as defined in the
/// `Render Pass Compatibility` section of the Vulkan specs.
// TODO: return proper error
@ -209,7 +225,8 @@ pub unsafe trait RenderPassCompatible<Other: ?Sized>: RenderPassDesc where Other
}
unsafe impl<A, B: ?Sized> RenderPassCompatible<B> for A
where A: RenderPassDesc, B: RenderPassDesc
where A: RenderPassDesc,
B: RenderPassDesc
{
fn is_compatible_with(&self, other: &B) -> bool {
// FIXME:
@ -237,15 +254,17 @@ pub struct Subpass<L> {
subpass_id: u32,
}
impl<L> Subpass<L> where L: RenderPassDesc {
impl<L> Subpass<L>
where L: RenderPassDesc
{
/// Returns a handle that represents a subpass of a render pass.
#[inline]
pub fn from(render_pass: L, id: u32) -> Option<Subpass<L>> {
if (id as usize) < render_pass.num_subpasses() {
Some(Subpass {
render_pass: render_pass,
subpass_id: id,
})
render_pass: render_pass,
subpass_id: id,
})
} else {
None
@ -255,7 +274,9 @@ impl<L> Subpass<L> where L: RenderPassDesc {
/// Returns the number of color attachments in this subpass.
#[inline]
pub fn num_color_attachments(&self) -> u32 {
self.render_pass.num_color_attachments(self.subpass_id).unwrap()
self.render_pass
.num_color_attachments(self.subpass_id)
.unwrap()
}
/// Returns true if the subpass has a depth attachment or a depth-stencil attachment.
@ -268,7 +289,9 @@ impl<L> Subpass<L> where L: RenderPassDesc {
/// layout is not `DepthStencilReadOnlyOptimal`.
#[inline]
pub fn has_writable_depth(&self) -> bool {
self.render_pass.has_writable_depth(self.subpass_id).unwrap()
self.render_pass
.has_writable_depth(self.subpass_id)
.unwrap()
}
/// Returns true if the subpass has a stencil attachment or a depth-stencil attachment.
@ -281,14 +304,18 @@ impl<L> Subpass<L> where L: RenderPassDesc {
/// layout is not `DepthStencilReadOnlyOptimal`.
#[inline]
pub fn has_writable_stencil(&self) -> bool {
self.render_pass.has_writable_stencil(self.subpass_id).unwrap()
self.render_pass
.has_writable_stencil(self.subpass_id)
.unwrap()
}
/// Returns true if the subpass has any color or depth/stencil attachment.
#[inline]
pub fn has_color_or_depth_stencil_attachment(&self) -> bool {
self.num_color_attachments() >= 1 ||
self.render_pass.has_depth_stencil_attachment(self.subpass_id).unwrap() != (false, false)
self.render_pass
.has_depth_stencil_attachment(self.subpass_id)
.unwrap() != (false, false)
}
/// Returns the number of samples in the color and/or depth/stencil attachments. Returns `None`

View File

@ -21,10 +21,10 @@ use format::FormatDesc;
use format::FormatTy;
use image::Dimensions;
use image::ImageDimensions;
use image::ViewType;
use image::sys::ImageCreationError;
use image::ImageLayout;
use image::ImageUsage;
use image::ViewType;
use image::sys::ImageCreationError;
use image::sys::UnsafeImage;
use image::sys::UnsafeImageView;
use image::traits::ImageAccess;
@ -147,7 +147,7 @@ impl<F> AttachmentImage<F> {
{
let base_usage = ImageUsage {
transient_attachment: true,
.. ImageUsage::none()
..ImageUsage::none()
};
AttachmentImage::new_impl(device, dimensions, format, base_usage, 1)
@ -158,20 +158,22 @@ impl<F> AttachmentImage<F> {
/// > **Note**: You can also use this function and pass `1` for the number of samples if you
/// > want a regular image.
#[inline]
pub fn transient_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
pub fn transient_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32,
format: F)
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
where F: FormatDesc
{
let base_usage = ImageUsage {
transient_attachment: true,
.. ImageUsage::none()
..ImageUsage::none()
};
AttachmentImage::new_impl(device, dimensions, format, base_usage, samples)
}
fn new_impl(device: Arc<Device>, dimensions: [u32; 2], format: F, base_usage: ImageUsage,
samples: u32) -> Result<Arc<AttachmentImage<F>>, ImageCreationError>
samples: u32)
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
where F: FormatDesc
{
// TODO: check dimensions against the max_framebuffer_width/height/layers limits
@ -181,13 +183,13 @@ impl<F> AttachmentImage<F> {
FormatTy::DepthStencil => true,
FormatTy::Stencil => true,
FormatTy::Compressed => panic!(),
_ => false
_ => false,
};
let usage = ImageUsage {
color_attachment: !is_depth,
depth_stencil_attachment: is_depth,
.. base_usage
..base_usage
};
let (image, mem_reqs) = unsafe {
@ -195,40 +197,57 @@ impl<F> AttachmentImage<F> {
width: dimensions[0],
height: dimensions[1],
array_layers: 1,
cubemap_compatible: false
cubemap_compatible: false,
};
try!(UnsafeImage::new(device.clone(), usage, format.format(), dims,
samples, 1, Sharing::Exclusive::<Empty<u32>>, false, false))
UnsafeImage::new(device.clone(),
usage,
format.format(),
dims,
samples,
1,
Sharing::Exclusive::<Empty<u32>>,
false,
false)?
};
let mem_ty = {
let device_local = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
mem_reqs.size, mem_reqs.alignment, AllocLayout::Optimal));
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Optimal)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
unsafe { try!(image.bind_memory(mem.memory(), mem.offset())); }
unsafe {
image.bind_memory(mem.memory(), mem.offset())?;
}
let view = unsafe {
try!(UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1))
};
let view = unsafe { UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1)? };
Ok(Arc::new(AttachmentImage {
image: image,
view: view,
memory: mem,
format: format,
attachment_layout: if is_depth { ImageLayout::DepthStencilAttachmentOptimal }
else { ImageLayout::ColorAttachmentOptimal },
gpu_lock: AtomicUsize::new(0),
}))
image: image,
view: view,
memory: mem,
format: format,
attachment_layout: if is_depth {
ImageLayout::DepthStencilAttachmentOptimal
} else {
ImageLayout::ColorAttachmentOptimal
},
gpu_lock: AtomicUsize::new(0),
}))
}
}
@ -300,7 +319,7 @@ unsafe impl<P, F, A> ImageContent<P> for Arc<AttachmentImage<F, A>>
{
#[inline]
fn matches_format(&self) -> bool {
true // FIXME:
true // FIXME:
}
}
@ -315,7 +334,10 @@ unsafe impl<F, A> ImageViewAccess for AttachmentImage<F, A>
#[inline]
fn dimensions(&self) -> Dimensions {
let dims = self.image.dimensions();
Dimensions::Dim2d { width: dims.width(), height: dims.height() }
Dimensions::Dim2d {
width: dims.width(),
height: dims.height(),
}
}
#[inline]

View File

@ -7,8 +7,8 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use smallvec::SmallVec;
use std::sync::Arc;
use device::Device;
use device::Queue;
@ -16,10 +16,10 @@ use format::Format;
use format::FormatDesc;
use image::Dimensions;
use image::ImageDimensions;
use image::MipmapsCount;
use image::sys::ImageCreationError;
use image::ImageLayout;
use image::ImageUsage;
use image::MipmapsCount;
use image::sys::ImageCreationError;
use image::sys::UnsafeImage;
use image::sys::UnsafeImageView;
use image::traits::ImageAccess;
@ -37,7 +37,9 @@ use sync::Sharing;
/// but then you must only ever read from it. TODO: clarify because of blit operations
// TODO: type (2D, 3D, array, etc.) as template parameter
#[derive(Debug)]
pub struct ImmutableImage<F, A = Arc<StdMemoryPool>> where A: MemoryPool {
pub struct ImmutableImage<F, A = Arc<StdMemoryPool>>
where A: MemoryPool
{
image: UnsafeImage,
view: UnsafeImageView,
dimensions: Dimensions,
@ -51,26 +53,35 @@ impl<F> ImmutableImage<F> {
#[inline]
pub fn new<'a, I>(device: Arc<Device>, dimensions: Dimensions, format: F, queue_families: I)
-> Result<Arc<ImmutableImage<F>>, ImageCreationError>
where F: FormatDesc, I: IntoIterator<Item = QueueFamily<'a>>
where F: FormatDesc,
I: IntoIterator<Item = QueueFamily<'a>>
{
ImmutableImage::with_mipmaps(device, dimensions, format, MipmapsCount::One, queue_families)
ImmutableImage::with_mipmaps(device,
dimensions,
format,
MipmapsCount::One,
queue_families)
}
/// Builds a new immutable image with the given number of mipmaps.
pub fn with_mipmaps<'a, I, M>(device: Arc<Device>, dimensions: Dimensions, format: F,
mipmaps: M, queue_families: I)
-> Result<Arc<ImmutableImage<F>>, ImageCreationError>
where F: FormatDesc, I: IntoIterator<Item = QueueFamily<'a>>, M: Into<MipmapsCount>
where F: FormatDesc,
I: IntoIterator<Item = QueueFamily<'a>>,
M: Into<MipmapsCount>
{
let usage = ImageUsage {
transfer_source: true, // for blits
transfer_source: true, // for blits
transfer_dest: true,
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let queue_families = queue_families.into_iter().map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let queue_families = queue_families
.into_iter()
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let (image, mem_reqs) = unsafe {
let sharing = if queue_families.len() >= 2 {
@ -79,40 +90,60 @@ impl<F> ImmutableImage<F> {
Sharing::Exclusive
};
try!(UnsafeImage::new(device.clone(), usage, format.format(), dimensions.to_image_dimensions(),
1, mipmaps, sharing, false, false))
UnsafeImage::new(device.clone(),
usage,
format.format(),
dimensions.to_image_dimensions(),
1,
mipmaps,
sharing,
false,
false)?
};
let mem_ty = {
let device_local = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
mem_reqs.size, mem_reqs.alignment, AllocLayout::Optimal));
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Optimal)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
unsafe { try!(image.bind_memory(mem.memory(), mem.offset())); }
unsafe {
image.bind_memory(mem.memory(), mem.offset())?;
}
let view = unsafe {
try!(UnsafeImageView::raw(&image, dimensions.to_view_type(), 0 .. image.mipmap_levels(),
0 .. image.dimensions().array_layers()))
UnsafeImageView::raw(&image,
dimensions.to_view_type(),
0 .. image.mipmap_levels(),
0 .. image.dimensions().array_layers())?
};
Ok(Arc::new(ImmutableImage {
image: image,
view: view,
memory: mem,
dimensions: dimensions,
format: format,
}))
image: image,
view: view,
memory: mem,
dimensions: dimensions,
format: format,
}))
}
}
impl<F, A> ImmutableImage<F, A> where A: MemoryPool {
impl<F, A> ImmutableImage<F, A>
where A: MemoryPool
{
/// Returns the dimensions of the image.
#[inline]
pub fn dimensions(&self) -> Dimensions {
@ -126,7 +157,10 @@ impl<F, A> ImmutableImage<F, A> where A: MemoryPool {
}
}
unsafe impl<F, A> ImageAccess for ImmutableImage<F, A> where F: 'static + Send + Sync, A: MemoryPool {
unsafe impl<F, A> ImageAccess for ImmutableImage<F, A>
where F: 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn inner(&self) -> &UnsafeImage {
&self.image
@ -134,12 +168,12 @@ unsafe impl<F, A> ImageAccess for ImmutableImage<F, A> where F: 'static + Send +
#[inline]
fn initial_layout_requirement(&self) -> ImageLayout {
ImageLayout::ShaderReadOnlyOptimal // TODO: ?
ImageLayout::ShaderReadOnlyOptimal // TODO: ?
}
#[inline]
fn final_layout_requirement(&self) -> ImageLayout {
ImageLayout::ShaderReadOnlyOptimal // TODO: ?
ImageLayout::ShaderReadOnlyOptimal // TODO: ?
}
#[inline]
@ -149,7 +183,7 @@ unsafe impl<F, A> ImageAccess for ImmutableImage<F, A> where F: 'static + Send +
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, queue: &Queue) -> Result<(), AccessError> {
Ok(()) // FIXME:
Ok(()) // FIXME:
}
#[inline]
@ -164,16 +198,18 @@ unsafe impl<F, A> ImageAccess for ImmutableImage<F, A> where F: 'static + Send +
}
unsafe impl<P, F, A> ImageContent<P> for ImmutableImage<F, A>
where F: 'static + Send + Sync, A: MemoryPool
where F: 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn matches_format(&self) -> bool {
true // FIXME:
true // FIXME:
}
}
unsafe impl<F: 'static, A> ImageViewAccess for ImmutableImage<F, A>
where F: 'static + Send + Sync, A: MemoryPool
where F: 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn parent(&self) -> &ImageAccess {

View File

@ -8,7 +8,7 @@
// according to those terms.
//! Images storage (1D, 2D, 3D, arrays, etc.).
//!
//!
//! An *image* is a location in memory whose purpose is to store multi-dimensional data. Its
//! most common usage is to store a 2D array of color pixels (in other words an *image* in the
//! everyday language), but it can also be used to store arbitrary data.
@ -56,11 +56,11 @@ pub use self::traits::ImageAccess;
pub use self::traits::ImageViewAccess;
pub use self::usage::ImageUsage;
pub mod attachment; // TODO: make private
pub mod immutable; // TODO: make private
pub mod attachment; // TODO: make private
pub mod immutable; // TODO: make private
mod layout;
mod storage;
pub mod swapchain; // TODO: make private
pub mod swapchain; // TODO: make private
pub mod sys;
pub mod traits;
mod usage;
@ -144,7 +144,11 @@ pub enum Dimensions {
Dim1d { width: u32 },
Dim1dArray { width: u32, array_layers: u32 },
Dim2d { width: u32, height: u32 },
Dim2dArray { width: u32, height: u32, array_layers: u32 },
Dim2dArray {
width: u32,
height: u32,
array_layers: u32,
},
Dim3d { width: u32, height: u32, depth: u32 },
Cubemap { size: u32 },
CubemapArray { size: u32, array_layers: u32 },
@ -171,7 +175,7 @@ impl Dimensions {
Dimensions::Dim1dArray { .. } => 1,
Dimensions::Dim2d { height, .. } => height,
Dimensions::Dim2dArray { height, .. } => height,
Dimensions::Dim3d { height, .. } => height,
Dimensions::Dim3d { height, .. } => height,
Dimensions::Cubemap { size } => size,
Dimensions::CubemapArray { size, .. } => size,
}
@ -189,7 +193,7 @@ impl Dimensions {
Dimensions::Dim1dArray { .. } => 1,
Dimensions::Dim2d { .. } => 1,
Dimensions::Dim2dArray { .. } => 1,
Dimensions::Dim3d { depth, .. } => depth,
Dimensions::Dim3d { depth, .. } => depth,
Dimensions::Cubemap { .. } => 1,
Dimensions::CubemapArray { .. } => 1,
}
@ -207,7 +211,7 @@ impl Dimensions {
Dimensions::Dim1dArray { array_layers, .. } => array_layers,
Dimensions::Dim2d { .. } => 1,
Dimensions::Dim2dArray { array_layers, .. } => array_layers,
Dimensions::Dim3d { .. } => 1,
Dimensions::Dim3d { .. } => 1,
Dimensions::Cubemap { .. } => 1,
Dimensions::CubemapArray { array_layers, .. } => array_layers,
}
@ -220,7 +224,7 @@ impl Dimensions {
Dimensions::Dim1dArray { array_layers, .. } => array_layers,
Dimensions::Dim2d { .. } => 1,
Dimensions::Dim2dArray { array_layers, .. } => array_layers,
Dimensions::Dim3d { .. } => 1,
Dimensions::Dim3d { .. } => 1,
Dimensions::Cubemap { .. } => 6,
Dimensions::CubemapArray { array_layers, .. } => array_layers * 6,
}
@ -231,29 +235,66 @@ impl Dimensions {
pub fn to_image_dimensions(&self) -> ImageDimensions {
match *self {
Dimensions::Dim1d { width } => {
ImageDimensions::Dim1d { width: width, array_layers: 1 }
ImageDimensions::Dim1d {
width: width,
array_layers: 1,
}
},
Dimensions::Dim1dArray { width, array_layers } => {
ImageDimensions::Dim1d { width: width, array_layers: array_layers }
Dimensions::Dim1dArray {
width,
array_layers,
} => {
ImageDimensions::Dim1d {
width: width,
array_layers: array_layers,
}
},
Dimensions::Dim2d { width, height } => {
ImageDimensions::Dim2d { width: width, height: height, array_layers: 1,
cubemap_compatible: false }
ImageDimensions::Dim2d {
width: width,
height: height,
array_layers: 1,
cubemap_compatible: false,
}
},
Dimensions::Dim2dArray { width, height, array_layers } => {
ImageDimensions::Dim2d { width: width, height: height,
array_layers: array_layers, cubemap_compatible: false }
Dimensions::Dim2dArray {
width,
height,
array_layers,
} => {
ImageDimensions::Dim2d {
width: width,
height: height,
array_layers: array_layers,
cubemap_compatible: false,
}
},
Dimensions::Dim3d { width, height, depth } => {
ImageDimensions::Dim3d { width: width, height: height, depth: depth }
Dimensions::Dim3d {
width,
height,
depth,
} => {
ImageDimensions::Dim3d {
width: width,
height: height,
depth: depth,
}
},
Dimensions::Cubemap { size } => {
ImageDimensions::Dim2d { width: size, height: size, array_layers: 6,
cubemap_compatible: true }
ImageDimensions::Dim2d {
width: size,
height: size,
array_layers: 6,
cubemap_compatible: true,
}
},
Dimensions::CubemapArray { size, array_layers } => {
ImageDimensions::Dim2d { width: size, height: size, array_layers: array_layers * 6,
cubemap_compatible: true }
ImageDimensions::Dim2d {
width: size,
height: size,
array_layers: array_layers * 6,
cubemap_compatible: true,
}
},
}
}
@ -287,8 +328,13 @@ pub enum ViewType {
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ImageDimensions {
Dim1d { width: u32, array_layers: u32 },
Dim2d { width: u32, height: u32, array_layers: u32, cubemap_compatible: bool },
Dim3d { width: u32, height: u32, depth: u32 }
Dim2d {
width: u32,
height: u32,
array_layers: u32,
cubemap_compatible: bool,
},
Dim3d { width: u32, height: u32, depth: u32 },
}
impl ImageDimensions {
@ -297,7 +343,7 @@ impl ImageDimensions {
match *self {
ImageDimensions::Dim1d { width, .. } => width,
ImageDimensions::Dim2d { width, .. } => width,
ImageDimensions::Dim3d { width, .. } => width,
ImageDimensions::Dim3d { width, .. } => width,
}
}
@ -306,7 +352,7 @@ impl ImageDimensions {
match *self {
ImageDimensions::Dim1d { .. } => 1,
ImageDimensions::Dim2d { height, .. } => height,
ImageDimensions::Dim3d { height, .. } => height,
ImageDimensions::Dim3d { height, .. } => height,
}
}
@ -320,7 +366,7 @@ impl ImageDimensions {
match *self {
ImageDimensions::Dim1d { .. } => 1,
ImageDimensions::Dim2d { .. } => 1,
ImageDimensions::Dim3d { depth, .. } => depth,
ImageDimensions::Dim3d { depth, .. } => depth,
}
}
@ -334,7 +380,7 @@ impl ImageDimensions {
match *self {
ImageDimensions::Dim1d { array_layers, .. } => array_layers,
ImageDimensions::Dim2d { array_layers, .. } => array_layers,
ImageDimensions::Dim3d { .. } => 1,
ImageDimensions::Dim3d { .. } => 1,
}
}
}

View File

@ -7,23 +7,23 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::iter::Empty;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use smallvec::SmallVec;
use device::Device;
use device::Queue;
use format::ClearValue;
use format::Format;
use format::FormatDesc;
use format::FormatTy;
use format::Format;
use image::Dimensions;
use image::ImageDimensions;
use image::sys::ImageCreationError;
use image::ImageLayout;
use image::ImageUsage;
use image::sys::ImageCreationError;
use image::sys::UnsafeImage;
use image::sys::UnsafeImageView;
use image::traits::ImageAccess;
@ -41,7 +41,9 @@ use sync::Sharing;
/// General-purpose image in device memory. Can be used for any usage, but will be slower than a
/// specialized image.
#[derive(Debug)]
pub struct StorageImage<F, A = Arc<StdMemoryPool>> where A: MemoryPool {
pub struct StorageImage<F, A = Arc<StdMemoryPool>>
where A: MemoryPool
{
// Inner implementation.
image: UnsafeImage,
@ -69,14 +71,14 @@ impl<F> StorageImage<F> {
pub fn new<'a, I>(device: Arc<Device>, dimensions: Dimensions, format: F, queue_families: I)
-> Result<Arc<StorageImage<F>>, ImageCreationError>
where F: FormatDesc,
I: IntoIterator<Item = QueueFamily<'a>>
I: IntoIterator<Item = QueueFamily<'a>>
{
let is_depth = match format.format().ty() {
FormatTy::Depth => true,
FormatTy::DepthStencil => true,
FormatTy::Stencil => true,
FormatTy::Compressed => panic!(),
_ => false
_ => false,
};
let usage = ImageUsage {
@ -90,8 +92,10 @@ impl<F> StorageImage<F> {
transient_attachment: false,
};
let queue_families = queue_families.into_iter().map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let queue_families = queue_families
.into_iter()
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let (image, mem_reqs) = unsafe {
let sharing = if queue_families.len() >= 2 {
@ -100,42 +104,62 @@ impl<F> StorageImage<F> {
Sharing::Exclusive
};
try!(UnsafeImage::new(device.clone(), usage, format.format(), dimensions.to_image_dimensions(),
1, 1, Sharing::Exclusive::<Empty<u32>>, false, false))
UnsafeImage::new(device.clone(),
usage,
format.format(),
dimensions.to_image_dimensions(),
1,
1,
Sharing::Exclusive::<Empty<u32>>,
false,
false)?
};
let mem_ty = {
let device_local = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device.physical_device().memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
mem_reqs.size, mem_reqs.alignment, AllocLayout::Optimal));
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Optimal)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
unsafe { try!(image.bind_memory(mem.memory(), mem.offset())); }
unsafe {
image.bind_memory(mem.memory(), mem.offset())?;
}
let view = unsafe {
try!(UnsafeImageView::raw(&image, dimensions.to_view_type(), 0 .. image.mipmap_levels(),
0 .. image.dimensions().array_layers()))
UnsafeImageView::raw(&image,
dimensions.to_view_type(),
0 .. image.mipmap_levels(),
0 .. image.dimensions().array_layers())?
};
Ok(Arc::new(StorageImage {
image: image,
view: view,
memory: mem,
dimensions: dimensions,
format: format,
queue_families: queue_families,
gpu_lock: AtomicUsize::new(0),
}))
image: image,
view: view,
memory: mem,
dimensions: dimensions,
format: format,
queue_families: queue_families,
gpu_lock: AtomicUsize::new(0),
}))
}
}
impl<F, A> StorageImage<F, A> where A: MemoryPool {
impl<F, A> StorageImage<F, A>
where A: MemoryPool
{
/// Returns the dimensions of the image.
#[inline]
pub fn dimensions(&self) -> Dimensions {
@ -143,7 +167,10 @@ impl<F, A> StorageImage<F, A> where A: MemoryPool {
}
}
unsafe impl<F, A> ImageAccess for StorageImage<F, A> where F: 'static + Send + Sync, A: MemoryPool {
unsafe impl<F, A> ImageAccess for StorageImage<F, A>
where F: 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn inner(&self) -> &UnsafeImage {
&self.image
@ -188,7 +215,8 @@ unsafe impl<F, A> ImageAccess for StorageImage<F, A> where F: 'static + Send + S
}
unsafe impl<F, A> ImageClearValue<F::ClearValue> for StorageImage<F, A>
where F: FormatDesc + 'static + Send + Sync, A: MemoryPool
where F: FormatDesc + 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn decode(&self, value: F::ClearValue) -> Option<ClearValue> {
@ -197,16 +225,18 @@ unsafe impl<F, A> ImageClearValue<F::ClearValue> for StorageImage<F, A>
}
unsafe impl<P, F, A> ImageContent<P> for StorageImage<F, A>
where F: 'static + Send + Sync, A: MemoryPool
where F: 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn matches_format(&self) -> bool {
true // FIXME:
true // FIXME:
}
}
unsafe impl<F, A> ImageViewAccess for StorageImage<F, A>
where F: 'static + Send + Sync, A: MemoryPool
where F: 'static + Send + Sync,
A: MemoryPool
{
#[inline]
fn parent(&self) -> &ImageAccess {
@ -258,7 +288,13 @@ mod tests {
#[test]
fn create() {
let (device, queue) = gfx_dev_and_queue!();
let _img = StorageImage::new(device, Dimensions::Dim2d { width: 32, height: 32 },
Format::R8G8B8A8Unorm, Some(queue.family())).unwrap();
let _img = StorageImage::new(device,
Dimensions::Dim2d {
width: 32,
height: 32,
},
Format::R8G8B8A8Unorm,
Some(queue.family()))
.unwrap();
}
}

View File

@ -13,16 +13,16 @@ use device::Queue;
use format::ClearValue;
use format::Format;
use format::FormatDesc;
use image::ImageDimensions;
use image::Dimensions;
use image::ImageDimensions;
use image::ImageLayout;
use image::ViewType;
use image::sys::UnsafeImage;
use image::sys::UnsafeImageView;
use image::traits::ImageAccess;
use image::traits::ImageClearValue;
use image::traits::ImageContent;
use image::traits::ImageViewAccess;
use image::ImageLayout;
use image::sys::UnsafeImage;
use image::sys::UnsafeImageView;
use swapchain::Swapchain;
use sync::AccessError;
@ -53,16 +53,15 @@ impl SwapchainImage {
///
/// This is an internal method that you shouldn't call.
pub unsafe fn from_raw(swapchain: Arc<Swapchain>, id: usize)
-> Result<Arc<SwapchainImage>, OomError>
{
-> Result<Arc<SwapchainImage>, OomError> {
let image = swapchain.raw_image(id).unwrap();
let view = try!(UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1));
let view = UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1)?;
Ok(Arc::new(SwapchainImage {
swapchain: swapchain.clone(),
image_offset: id,
view: view,
}))
swapchain: swapchain.clone(),
image_offset: id,
view: view,
}))
}
/// Returns the dimensions of the image.
@ -122,8 +121,7 @@ unsafe impl ImageAccess for SwapchainImage {
}
}
unsafe impl ImageClearValue<<Format as FormatDesc>::ClearValue> for SwapchainImage
{
unsafe impl ImageClearValue<<Format as FormatDesc>::ClearValue> for SwapchainImage {
#[inline]
fn decode(&self, value: <Format as FormatDesc>::ClearValue) -> Option<ClearValue> {
Some(self.swapchain.format().decode_clear_value(value))
@ -133,7 +131,7 @@ unsafe impl ImageClearValue<<Format as FormatDesc>::ClearValue> for SwapchainIma
unsafe impl<P> ImageContent<P> for SwapchainImage {
#[inline]
fn matches_format(&self) -> bool {
true // FIXME:
true // FIXME:
}
}
@ -146,7 +144,10 @@ unsafe impl ImageViewAccess for SwapchainImage {
#[inline]
fn dimensions(&self) -> Dimensions {
let dims = self.swapchain.dimensions();
Dimensions::Dim2d { width: dims[0], height: dims[1] }
Dimensions::Dim2d {
width: dims[0],
height: dims[1],
}
}
#[inline]

View File

@ -13,13 +13,13 @@
//! other image or image view types of this library, and all custom image or image view types
//! that you create must wrap around the types in this module.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Range;
use std::ptr;
use std::sync::Arc;
use smallvec::SmallVec;
use device::Device;
use format::Format;
@ -83,15 +83,23 @@ impl UnsafeImage {
sharing: Sharing<I>, linear_tiling: bool,
preinitialized_layout: bool)
-> Result<(UnsafeImage, MemoryRequirements), ImageCreationError>
where Mi: Into<MipmapsCount>, I: Iterator<Item = u32>
where Mi: Into<MipmapsCount>,
I: Iterator<Item = u32>
{
let sharing = match sharing {
Sharing::Exclusive => (vk::SHARING_MODE_EXCLUSIVE, SmallVec::<[u32; 8]>::new()),
Sharing::Concurrent(ids) => (vk::SHARING_MODE_CONCURRENT, ids.collect()),
};
UnsafeImage::new_impl(device, usage, format, dimensions, num_samples, mipmaps.into(),
sharing, linear_tiling, preinitialized_layout)
UnsafeImage::new_impl(device,
usage,
format,
dimensions,
num_samples,
mipmaps.into(),
sharing,
linear_tiling,
preinitialized_layout)
}
// Non-templated version to avoid inlining and improve compile times.
@ -99,8 +107,7 @@ impl UnsafeImage {
dimensions: ImageDimensions, num_samples: u32, mipmaps: MipmapsCount,
(sh_mode, sh_indices): (vk::SharingMode, SmallVec<[u32; 8]>),
linear_tiling: bool, preinitialized_layout: bool)
-> Result<(UnsafeImage, MemoryRequirements), ImageCreationError>
{
-> Result<(UnsafeImage, MemoryRequirements), ImageCreationError> {
// TODO: doesn't check that the proper features are enabled
let vk = device.pointers();
@ -132,17 +139,27 @@ impl UnsafeImage {
if usage.color_attachment && (features & vk::FORMAT_FEATURE_COLOR_ATTACHMENT_BIT == 0) {
return Err(ImageCreationError::UnsupportedUsage);
}
if usage.depth_stencil_attachment && (features & vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT == 0) {
if usage.depth_stencil_attachment &&
(features & vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT == 0)
{
return Err(ImageCreationError::UnsupportedUsage);
}
if usage.input_attachment && (features & (vk::FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
if usage.input_attachment &&
(features &
(vk::FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0)
{
return Err(ImageCreationError::UnsupportedUsage);
}
if device.loaded_extensions().khr_maintenance1 {
if usage.transfer_source && (features & vk::FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR == 0) {
if usage.transfer_source &&
(features & vk::FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR == 0)
{
return Err(ImageCreationError::UnsupportedUsage);
}
if usage.transfer_dest && (features & vk::FORMAT_FEATURE_TRANSFER_DST_BIT_KHR == 0) {
if usage.transfer_dest &&
(features & vk::FORMAT_FEATURE_TRANSFER_DST_BIT_KHR == 0)
{
return Err(ImageCreationError::UnsupportedUsage);
}
}
@ -158,7 +175,7 @@ impl UnsafeImage {
color_attachment: false,
depth_stencil_attachment: false,
input_attachment: false,
.. usage.clone()
..usage.clone()
};
if u != ImageUsage::none() {
@ -181,7 +198,11 @@ impl UnsafeImage {
ImageDimensions::Dim2d { width, height, .. } => {
if width < height { width } else { height }
},
ImageDimensions::Dim3d { width, height, depth } => {
ImageDimensions::Dim3d {
width,
height,
depth,
} => {
if width < height {
if depth < width { depth } else { width }
} else {
@ -198,12 +219,14 @@ impl UnsafeImage {
MipmapsCount::Specific(num) => {
if num < 1 {
return Err(ImageCreationError::InvalidMipmapsCount {
obtained: num, valid_range: 1 .. max_mipmaps + 1
});
obtained: num,
valid_range: 1 .. max_mipmaps + 1,
});
} else if num > max_mipmaps {
capabilities_error = Some(ImageCreationError::InvalidMipmapsCount {
obtained: num, valid_range: 1 .. max_mipmaps + 1
});
obtained: num,
valid_range: 1 .. max_mipmaps + 1,
});
}
num
@ -220,61 +243,85 @@ impl UnsafeImage {
return Err(ImageCreationError::UnsupportedSamplesCount { obtained: num_samples });
} else {
let mut supported_samples = 0x7f; // all bits up to VK_SAMPLE_COUNT_64_BIT
let mut supported_samples = 0x7f; // all bits up to VK_SAMPLE_COUNT_64_BIT
if usage.sampled {
match format.ty() {
FormatTy::Float | FormatTy::Compressed => {
supported_samples &= device.physical_device().limits()
.sampled_image_color_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.sampled_image_color_sample_counts();
},
FormatTy::Uint | FormatTy::Sint => {
supported_samples &= device.physical_device().limits()
.sampled_image_integer_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.sampled_image_integer_sample_counts();
},
FormatTy::Depth => {
supported_samples &= device.physical_device().limits()
.sampled_image_depth_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.sampled_image_depth_sample_counts();
},
FormatTy::Stencil => {
supported_samples &= device.physical_device().limits()
.sampled_image_stencil_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.sampled_image_stencil_sample_counts();
},
FormatTy::DepthStencil => {
supported_samples &= device.physical_device().limits()
.sampled_image_depth_sample_counts();
supported_samples &= device.physical_device().limits()
.sampled_image_stencil_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.sampled_image_depth_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.sampled_image_stencil_sample_counts();
},
}
}
if usage.storage {
supported_samples &= device.physical_device().limits()
.storage_image_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.storage_image_sample_counts();
}
if usage.color_attachment || usage.depth_stencil_attachment || usage.input_attachment ||
usage.transient_attachment
if usage.color_attachment || usage.depth_stencil_attachment ||
usage.input_attachment || usage.transient_attachment
{
match format.ty() {
FormatTy::Float | FormatTy::Compressed | FormatTy::Uint | FormatTy::Sint => {
supported_samples &= device.physical_device().limits()
.framebuffer_color_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.framebuffer_color_sample_counts();
},
FormatTy::Depth => {
supported_samples &= device.physical_device().limits()
.framebuffer_depth_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.framebuffer_depth_sample_counts();
},
FormatTy::Stencil => {
supported_samples &= device.physical_device().limits()
.framebuffer_stencil_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.framebuffer_stencil_sample_counts();
},
FormatTy::DepthStencil => {
supported_samples &= device.physical_device().limits()
.framebuffer_depth_sample_counts();
supported_samples &= device.physical_device().limits()
.framebuffer_stencil_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.framebuffer_depth_sample_counts();
supported_samples &= device
.physical_device()
.limits()
.framebuffer_stencil_sample_counts();
},
}
}
@ -295,30 +342,65 @@ impl UnsafeImage {
// Decoding the dimensions.
let (ty, extent, array_layers, flags) = match dimensions {
ImageDimensions::Dim1d { width, array_layers } => {
ImageDimensions::Dim1d {
width,
array_layers,
} => {
if width == 0 || array_layers == 0 {
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
return Err(ImageCreationError::UnsupportedDimensions {
dimensions: dimensions,
});
}
let extent = vk::Extent3D { width: width, height: 1, depth: 1 };
let extent = vk::Extent3D {
width: width,
height: 1,
depth: 1,
};
(vk::IMAGE_TYPE_1D, extent, array_layers, 0)
},
ImageDimensions::Dim2d { width, height, array_layers, cubemap_compatible } => {
ImageDimensions::Dim2d {
width,
height,
array_layers,
cubemap_compatible,
} => {
if width == 0 || height == 0 || array_layers == 0 {
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
return Err(ImageCreationError::UnsupportedDimensions {
dimensions: dimensions,
});
}
if cubemap_compatible && width != height {
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
return Err(ImageCreationError::UnsupportedDimensions {
dimensions: dimensions,
});
}
let extent = vk::Extent3D { width: width, height: height, depth: 1 };
let flags = if cubemap_compatible { vk::IMAGE_CREATE_CUBE_COMPATIBLE_BIT }
else { 0 };
let extent = vk::Extent3D {
width: width,
height: height,
depth: 1,
};
let flags = if cubemap_compatible {
vk::IMAGE_CREATE_CUBE_COMPATIBLE_BIT
} else {
0
};
(vk::IMAGE_TYPE_2D, extent, array_layers, flags)
},
ImageDimensions::Dim3d { width, height, depth } => {
ImageDimensions::Dim3d {
width,
height,
depth,
} => {
if width == 0 || height == 0 || depth == 0 {
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
return Err(ImageCreationError::UnsupportedDimensions {
dimensions: dimensions,
});
}
let extent = vk::Extent3D { width: width, height: height, depth: depth };
let extent = vk::Extent3D {
width: width,
height: height,
depth: depth,
};
(vk::IMAGE_TYPE_3D, extent, 1, 0)
},
};
@ -344,9 +426,10 @@ impl UnsafeImage {
if (flags & vk::IMAGE_CREATE_CUBE_COMPATIBLE_BIT) != 0 {
let limit = device.physical_device().limits().max_image_dimension_cube();
debug_assert_eq!(extent.width, extent.height); // checked above
debug_assert_eq!(extent.width, extent.height); // checked above
if extent.width > limit {
let err = ImageCreationError::UnsupportedDimensions { dimensions: dimensions };
let err =
ImageCreationError::UnsupportedDimensions { dimensions: dimensions };
capabilities_error = Some(err);
}
}
@ -358,7 +441,7 @@ impl UnsafeImage {
capabilities_error = Some(err);
}
},
_ => unreachable!()
_ => unreachable!(),
};
let usage = usage.to_usage_bits();
@ -374,19 +457,26 @@ impl UnsafeImage {
let mut output = mem::uninitialized();
let physical_device = device.physical_device().internal_object();
let r = vk_i.GetPhysicalDeviceImageFormatProperties(physical_device, format as u32, ty,
tiling, usage, 0 /* TODO */,
let r = vk_i.GetPhysicalDeviceImageFormatProperties(physical_device,
format as u32,
ty,
tiling,
usage,
0, /* TODO */
&mut output);
match check_errors(r) {
Ok(_) => (),
Err(Error::FormatNotSupported) => return Err(ImageCreationError::FormatNotSupported),
Err(Error::FormatNotSupported) =>
return Err(ImageCreationError::FormatNotSupported),
Err(err) => return Err(err.into()),
}
if extent.width > output.maxExtent.width || extent.height > output.maxExtent.height ||
extent.depth > output.maxExtent.depth || mipmaps > output.maxMipLevels ||
array_layers > output.maxArrayLayers || (num_samples & output.sampleCounts) == 0
extent.depth > output.maxExtent.depth ||
mipmaps > output.maxMipLevels ||
array_layers > output.maxArrayLayers ||
(num_samples & output.sampleCounts) == 0
{
return Err(capabilities_error);
}
@ -421,8 +511,10 @@ impl UnsafeImage {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateImage(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateImage(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
@ -453,8 +545,7 @@ impl UnsafeImage {
/// This function is for example used at the swapchain's initialization.
pub unsafe fn from_raw(device: Arc<Device>, handle: u64, usage: u32, format: Format,
dimensions: ImageDimensions, samples: u32, mipmaps: u32)
-> UnsafeImage
{
-> UnsafeImage {
let vk_i = device.instance().pointers();
let physical_device = device.physical_device().internal_object();
@ -472,27 +563,28 @@ impl UnsafeImage {
samples: samples,
mipmaps: mipmaps,
format_features: output.optimalTilingFeatures,
needs_destruction: false, // TODO: pass as parameter
needs_destruction: false, // TODO: pass as parameter
}
}
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize)
-> Result<(), OomError>
{
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize) -> Result<(), OomError> {
let vk = self.device.pointers();
// We check for correctness in debug mode.
debug_assert!({
let mut mem_reqs = mem::uninitialized();
vk.GetImageMemoryRequirements(self.device.internal_object(), self.image,
&mut mem_reqs);
mem_reqs.size <= (memory.size() - offset) as u64 &&
(offset as u64 % mem_reqs.alignment) == 0 &&
mem_reqs.memoryTypeBits & (1 << memory.memory_type().id()) != 0
});
let mut mem_reqs = mem::uninitialized();
vk.GetImageMemoryRequirements(self.device.internal_object(),
self.image,
&mut mem_reqs);
mem_reqs.size <= (memory.size() - offset) as u64 &&
(offset as u64 % mem_reqs.alignment) == 0 &&
mem_reqs.memoryTypeBits & (1 << memory.memory_type().id()) != 0
});
try!(check_errors(vk.BindImageMemory(self.device.internal_object(), self.image,
memory.internal_object(), offset as vk::DeviceSize)));
check_errors(vk.BindImageMemory(self.device.internal_object(),
self.image,
memory.internal_object(),
offset as vk::DeviceSize))?;
Ok(())
}
@ -599,7 +691,9 @@ impl UnsafeImage {
};
let mut out = mem::uninitialized();
vk.GetImageSubresourceLayout(self.device.internal_object(), self.image, &subresource,
vk.GetImageSubresourceLayout(self.device.internal_object(),
self.image,
&subresource,
&mut out);
LinearLayout {
@ -706,7 +800,10 @@ pub enum ImageCreationError {
/// Not enough memory.
OomError(OomError),
/// A wrong number of mipmaps was provided.
InvalidMipmapsCount { obtained: u32, valid_range: Range<u32> },
InvalidMipmapsCount {
obtained: u32,
valid_range: Range<u32>,
},
/// The requeted number of samples is not supported, or is 0.
UnsupportedSamplesCount { obtained: u32 },
/// The dimensions are too large, or one of the dimensions is 0.
@ -724,16 +821,17 @@ impl error::Error for ImageCreationError {
fn description(&self) -> &str {
match *self {
ImageCreationError::OomError(_) => "not enough memory available",
ImageCreationError::InvalidMipmapsCount { .. } => "a wrong number of mipmaps was \
provided",
ImageCreationError::UnsupportedSamplesCount { .. } => "the requeted number of samples \
is not supported, or is 0",
ImageCreationError::UnsupportedDimensions { .. } => "the dimensions are too large, or \
one of the dimensions is 0",
ImageCreationError::FormatNotSupported => "the requested format is not supported by \
the Vulkan implementation",
ImageCreationError::UnsupportedUsage => "the format is supported, but at least one \
of the requested usages is not supported",
ImageCreationError::InvalidMipmapsCount { .. } =>
"a wrong number of mipmaps was provided",
ImageCreationError::UnsupportedSamplesCount { .. } =>
"the requeted number of samples is not supported, or is 0",
ImageCreationError::UnsupportedDimensions { .. } =>
"the dimensions are too large, or one of the dimensions is 0",
ImageCreationError::FormatNotSupported =>
"the requested format is not supported by the Vulkan implementation",
ImageCreationError::UnsupportedUsage =>
"the format is supported, but at least one of the requested usages is not \
supported",
ImageCreationError::ShaderStorageImageMultisampleFeatureNotEnabled => {
"the `shader_storage_image_multisample` feature must be enabled to create such \
an image"
@ -745,7 +843,7 @@ impl error::Error for ImageCreationError {
fn cause(&self) -> Option<&error::Error> {
match *self {
ImageCreationError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -770,7 +868,7 @@ impl From<Error> for ImageCreationError {
match err {
err @ Error::OutOfHostMemory => ImageCreationError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => ImageCreationError::OomError(OomError::from(err)),
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -810,8 +908,8 @@ pub struct UnsafeImageView {
impl UnsafeImageView {
/// See the docs of new().
pub unsafe fn raw(image: &UnsafeImage, ty: ViewType, mipmap_levels: Range<u32>,
array_layers: Range<u32>) -> Result<UnsafeImageView, OomError>
{
array_layers: Range<u32>)
-> Result<UnsafeImageView, OomError> {
let vk = image.device.pointers();
assert!(mipmap_levels.end > mipmap_levels.start);
@ -830,30 +928,39 @@ impl UnsafeImageView {
let view_type = match (image.dimensions(), ty, array_layers.end - array_layers.start) {
(ImageDimensions::Dim1d { .. }, ViewType::Dim1d, 1) => vk::IMAGE_VIEW_TYPE_1D,
(ImageDimensions::Dim1d { .. }, ViewType::Dim1dArray, _) => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
(ImageDimensions::Dim1d { .. }, ViewType::Dim1dArray, _) =>
vk::IMAGE_VIEW_TYPE_1D_ARRAY,
(ImageDimensions::Dim2d { .. }, ViewType::Dim2d, 1) => vk::IMAGE_VIEW_TYPE_2D,
(ImageDimensions::Dim2d { .. }, ViewType::Dim2dArray, _) => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::Cubemap, n) if cubemap_compatible => {
(ImageDimensions::Dim2d { .. }, ViewType::Dim2dArray, _) =>
vk::IMAGE_VIEW_TYPE_2D_ARRAY,
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::Cubemap, n)
if cubemap_compatible => {
assert_eq!(n, 6);
vk::IMAGE_VIEW_TYPE_CUBE
},
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::CubemapArray, n) if cubemap_compatible => {
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::CubemapArray, n)
if cubemap_compatible => {
assert_eq!(n % 6, 0);
vk::IMAGE_VIEW_TYPE_CUBE_ARRAY
},
(ImageDimensions::Dim3d { .. }, ViewType::Dim3d, _) => vk::IMAGE_VIEW_TYPE_3D,
_ => panic!()
_ => panic!(),
};
let view = {
let infos = vk::ImageViewCreateInfo {
sType: vk::STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
image: image.internal_object(),
viewType: view_type,
format: image.format as u32,
components: vk::ComponentMapping { r: 0, g: 0, b: 0, a: 0 }, // FIXME:
components: vk::ComponentMapping {
r: 0,
g: 0,
b: 0,
a: 0,
}, // FIXME:
subresourceRange: vk::ImageSubresourceRange {
aspectMask: aspect_mask,
baseMipLevel: mipmap_levels.start,
@ -864,18 +971,20 @@ impl UnsafeImageView {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateImageView(image.device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateImageView(image.device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(UnsafeImageView {
view: view,
device: image.device.clone(),
usage: image.usage,
identity_swizzle: true, // FIXME:
format: image.format,
})
view: view,
device: image.device.clone(),
usage: image.usage,
identity_swizzle: true, // FIXME:
format: image.format,
})
}
/// Creates a new view from an image.
@ -895,8 +1004,8 @@ impl UnsafeImageView {
///
#[inline]
pub unsafe fn new(image: &UnsafeImage, ty: ViewType, mipmap_levels: Range<u32>,
array_layers: Range<u32>) -> UnsafeImageView
{
array_layers: Range<u32>)
-> UnsafeImageView {
UnsafeImageView::raw(image, ty, mipmap_levels, array_layers).unwrap()
}
@ -978,11 +1087,11 @@ mod tests {
use std::u32;
use super::ImageCreationError;
use super::UnsafeImage;
use super::ImageUsage;
use super::UnsafeImage;
use image::ImageDimensions;
use format::Format;
use image::ImageDimensions;
use sync::Sharing;
#[test]
@ -991,14 +1100,24 @@ mod tests {
let usage = ImageUsage {
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let (_img, _) = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 1, 1,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
1,
1,
Sharing::Exclusive::<Empty<_>>,
false,
false)
}.unwrap();
}
@ -1009,14 +1128,24 @@ mod tests {
let usage = ImageUsage {
transient_attachment: true,
color_attachment: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let (_img, _) = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 1, 1,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
1,
1,
Sharing::Exclusive::<Empty<_>>,
false,
false)
}.unwrap();
}
@ -1026,19 +1155,29 @@ mod tests {
let usage = ImageUsage {
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 0, 1,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
0,
1,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::UnsupportedSamplesCount { .. }) => (),
_ => panic!()
_ => panic!(),
};
}
@ -1048,19 +1187,29 @@ mod tests {
let usage = ImageUsage {
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 5, 1,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
5,
1,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::UnsupportedSamplesCount { .. }) => (),
_ => panic!()
_ => panic!(),
};
}
@ -1070,45 +1219,68 @@ mod tests {
let usage = ImageUsage {
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 1, 0,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
1,
0,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::InvalidMipmapsCount { .. }) => (),
_ => panic!()
_ => panic!(),
};
}
#[test]
#[ignore] // TODO: AMD card seems to support a u32::MAX number of mipmaps
#[ignore] // TODO: AMD card seems to support a u32::MAX number of mipmaps
fn mipmaps_too_high() {
let (device, _) = gfx_dev_and_queue!();
let usage = ImageUsage {
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 1, u32::MAX,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
1,
u32::MAX,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::InvalidMipmapsCount { obtained, valid_range }) => {
Err(ImageCreationError::InvalidMipmapsCount {
obtained,
valid_range,
}) => {
assert_eq!(obtained, u32::MAX);
assert_eq!(valid_range.start, 1);
},
_ => panic!()
_ => panic!(),
};
}
@ -1118,20 +1290,30 @@ mod tests {
let usage = ImageUsage {
storage: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 2, 1,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
2,
1,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::ShaderStorageImageMultisampleFeatureNotEnabled) => (),
Err(ImageCreationError::UnsupportedSamplesCount { .. }) => (), // unlikely but possible
_ => panic!()
_ => panic!(),
};
}
@ -1141,20 +1323,30 @@ mod tests {
let usage = ImageUsage {
color_attachment: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::ASTC_5x4UnormBlock,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 1, u32::MAX,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::ASTC_5x4UnormBlock,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
1,
u32::MAX,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::FormatNotSupported) => (),
Err(ImageCreationError::UnsupportedUsage) => (),
_ => panic!()
_ => panic!(),
};
}
@ -1165,19 +1357,29 @@ mod tests {
let usage = ImageUsage {
transient_attachment: true,
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
cubemap_compatible: false }, 1, 1,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 32,
array_layers: 1,
cubemap_compatible: false,
},
1,
1,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::UnsupportedUsage) => (),
_ => panic!()
_ => panic!(),
};
}
@ -1187,19 +1389,29 @@ mod tests {
let usage = ImageUsage {
sampled: true,
.. ImageUsage::none()
..ImageUsage::none()
};
let res = unsafe {
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d { width: 32, height: 64, array_layers: 1,
cubemap_compatible: true }, 1, 1,
Sharing::Exclusive::<Empty<_>>, false, false)
UnsafeImage::new(device,
usage,
Format::R8G8B8A8Unorm,
ImageDimensions::Dim2d {
width: 32,
height: 64,
array_layers: 1,
cubemap_compatible: true,
},
1,
1,
Sharing::Exclusive::<Empty<_>>,
false,
false)
};
match res {
Err(ImageCreationError::UnsupportedDimensions { .. }) => (),
_ => panic!()
_ => panic!(),
};
}
}

View File

@ -11,12 +11,12 @@ use buffer::BufferAccess;
use device::Queue;
use format::ClearValue;
use format::Format;
use format::PossibleFloatFormatDesc;
use format::PossibleUintFormatDesc;
use format::PossibleSintFormatDesc;
use format::PossibleDepthFormatDesc;
use format::PossibleStencilFormatDesc;
use format::PossibleDepthStencilFormatDesc;
use format::PossibleFloatFormatDesc;
use format::PossibleSintFormatDesc;
use format::PossibleStencilFormatDesc;
use format::PossibleUintFormatDesc;
use image::Dimensions;
use image::ImageDimensions;
use image::ImageLayout;
@ -47,7 +47,7 @@ pub unsafe trait ImageAccess {
}
/// Returns true if the image has a depth component. In other words, if it is a depth or a
/// depth-stencil format.
/// depth-stencil format.
#[inline]
fn has_depth(&self) -> bool {
let format = self.format();
@ -55,7 +55,7 @@ pub unsafe trait ImageAccess {
}
/// Returns true if the image has a stencil component. In other words, if it is a stencil or a
/// depth-stencil format.
/// depth-stencil format.
#[inline]
fn has_stencil(&self) -> bool {
let format = self.format();
@ -117,10 +117,10 @@ pub unsafe trait ImageAccess {
///
/// If this function returns `false`, this means that we are allowed to access the offset/size
/// of `self` at the same time as the offset/size of `other` without causing a data race.
fn conflicts_buffer(&self, self_first_layer: u32, self_num_layers: u32, self_first_mipmap: u32,
self_num_mipmaps: u32, other: &BufferAccess, other_offset: usize,
other_size: usize) -> bool
{
fn conflicts_buffer(&self, self_first_layer: u32, self_num_layers: u32,
self_first_mipmap: u32, self_num_mipmaps: u32, other: &BufferAccess,
other_offset: usize, other_size: usize)
-> bool {
// TODO: should we really provide a default implementation?
false
}
@ -132,11 +132,11 @@ pub unsafe trait ImageAccess {
///
/// If this function returns `false`, this means that we are allowed to access the offset/size
/// of `self` at the same time as the offset/size of `other` without causing a data race.
fn conflicts_image(&self, self_first_layer: u32, self_num_layers: u32, self_first_mipmap: u32,
self_num_mipmaps: u32, other: &ImageAccess,
fn conflicts_image(&self, self_first_layer: u32, self_num_layers: u32,
self_first_mipmap: u32, self_num_mipmaps: u32, other: &ImageAccess,
other_first_layer: u32, other_num_layers: u32, other_first_mipmap: u32,
other_num_mipmaps: u32) -> bool
{
other_num_mipmaps: u32)
-> bool {
// TODO: should we really provide a default implementation?
// TODO: debug asserts to check for ranges
@ -166,15 +166,27 @@ pub unsafe trait ImageAccess {
/// Shortcut for `conflicts_buffer` that compares the whole buffer to another.
#[inline]
fn conflicts_buffer_all(&self, other: &BufferAccess) -> bool {
self.conflicts_buffer(0, self.dimensions().array_layers(), 0, self.mipmap_levels(),
other, 0, other.size())
self.conflicts_buffer(0,
self.dimensions().array_layers(),
0,
self.mipmap_levels(),
other,
0,
other.size())
}
/// Shortcut for `conflicts_image` that compares the whole buffer to a whole image.
#[inline]
fn conflicts_image_all(&self, other: &ImageAccess) -> bool {
self.conflicts_image(0, self.dimensions().array_layers(), 0, self.mipmap_levels(),
other, 0, other.dimensions().array_layers(), 0, other.mipmap_levels())
self.conflicts_image(0,
self.dimensions().array_layers(),
0,
self.mipmap_levels(),
other,
0,
other.dimensions().array_layers(),
0,
other.mipmap_levels())
}
/// Shortcut for `conflict_key` that grabs the key of the whole buffer.
@ -215,7 +227,10 @@ pub unsafe trait ImageAccess {
unsafe fn unlock(&self);
}
unsafe impl<T> ImageAccess for T where T: SafeDeref, T::Target: ImageAccess {
unsafe impl<T> ImageAccess for T
where T: SafeDeref,
T::Target: ImageAccess
{
#[inline]
fn inner(&self) -> &UnsafeImage {
(**self).inner()
@ -233,8 +248,7 @@ unsafe impl<T> ImageAccess for T where T: SafeDeref, T::Target: ImageAccess {
#[inline]
fn conflict_key(&self, first_layer: u32, num_layers: u32, first_mipmap: u32, num_mipmaps: u32)
-> u64
{
-> u64 {
(**self).conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
}
@ -286,9 +300,9 @@ unsafe impl<I> ImageAccess for ImageAccessFromUndefinedLayout<I>
#[inline]
fn conflict_key(&self, first_layer: u32, num_layers: u32, first_mipmap: u32, num_mipmaps: u32)
-> u64
{
self.image.conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
-> u64 {
self.image
.conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
}
#[inline]
@ -360,12 +374,17 @@ pub unsafe trait ImageViewAccess {
/// This method should check whether the sampler's configuration can be used with the format
/// of the view.
// TODO: return a Result
fn can_be_sampled(&self, sampler: &Sampler) -> bool { true /* FIXME */ }
fn can_be_sampled(&self, sampler: &Sampler) -> bool {
true /* FIXME */
}
//fn usable_as_render_pass_attachment(&self, ???) -> Result<(), ???>;
}
unsafe impl<T> ImageViewAccess for T where T: SafeDeref, T::Target: ImageViewAccess {
unsafe impl<T> ImageViewAccess for T
where T: SafeDeref,
T::Target: ImageViewAccess
{
#[inline]
fn parent(&self) -> &ImageAccess {
(**self).parent()

View File

@ -98,14 +98,30 @@ impl ImageUsage {
#[inline]
pub fn to_usage_bits(&self) -> vk::ImageUsageFlagBits {
let mut result = 0;
if self.transfer_source { result |= vk::IMAGE_USAGE_TRANSFER_SRC_BIT; }
if self.transfer_dest { result |= vk::IMAGE_USAGE_TRANSFER_DST_BIT; }
if self.sampled { result |= vk::IMAGE_USAGE_SAMPLED_BIT; }
if self.storage { result |= vk::IMAGE_USAGE_STORAGE_BIT; }
if self.color_attachment { result |= vk::IMAGE_USAGE_COLOR_ATTACHMENT_BIT; }
if self.depth_stencil_attachment { result |= vk::IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; }
if self.transient_attachment { result |= vk::IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; }
if self.input_attachment { result |= vk::IMAGE_USAGE_INPUT_ATTACHMENT_BIT; }
if self.transfer_source {
result |= vk::IMAGE_USAGE_TRANSFER_SRC_BIT;
}
if self.transfer_dest {
result |= vk::IMAGE_USAGE_TRANSFER_DST_BIT;
}
if self.sampled {
result |= vk::IMAGE_USAGE_SAMPLED_BIT;
}
if self.storage {
result |= vk::IMAGE_USAGE_STORAGE_BIT;
}
if self.color_attachment {
result |= vk::IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
if self.depth_stencil_attachment {
result |= vk::IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
if self.transient_attachment {
result |= vk::IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
}
if self.input_attachment {
result |= vk::IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
}
result
}

View File

@ -24,7 +24,7 @@
//! # use std::sync::Arc;
//! # let instance: Arc<Instance> = return;
//! use vulkano::instance::debug::DebugCallback;
//!
//!
//! let _callback = DebugCallback::errors_and_warnings(&instance, |msg| {
//! println!("Debug callback: {:?}", msg.description);
//! }).ok();
@ -34,23 +34,23 @@
//!
//! Note that you must keep the `_callback` object alive for as long as you want your callback to
//! be callable. If you don't store the return value of `DebugCallback`'s constructor in a
//! variable, it will be immediately destroyed and your callback will not work.
//! variable, it will be immediately destroyed and your callback will not work.
//!
use std::error;
use std::ffi::CStr;
use std::fmt;
use std::mem;
use std::os::raw::{c_void, c_char};
use std::os::raw::{c_char, c_void};
use std::panic;
use std::ptr;
use std::sync::Arc;
use instance::Instance;
use check_errors;
use Error;
use VulkanObject;
use check_errors;
use vk;
/// Registration of a callback called by validation layers.
@ -80,27 +80,27 @@ impl DebugCallback {
// that can't be casted to a `*const c_void`.
let user_callback = Box::new(Box::new(user_callback) as Box<_>);
extern "system" fn callback(ty: vk::DebugReportFlagsEXT,
_: vk::DebugReportObjectTypeEXT, _: u64, _: usize,
_: i32, layer_prefix: *const c_char,
description: *const c_char, user_data: *mut c_void) -> u32
{
extern "system" fn callback(ty: vk::DebugReportFlagsEXT, _: vk::DebugReportObjectTypeEXT,
_: u64, _: usize, _: i32, layer_prefix: *const c_char,
description: *const c_char, user_data: *mut c_void)
-> u32 {
unsafe {
let user_callback = user_data as *mut Box<Fn()> as *const _;
let user_callback: &Box<Fn(&Message)> = &*user_callback;
let layer_prefix = CStr::from_ptr(layer_prefix).to_str()
.expect("debug callback message \
not utf-8");
let description = CStr::from_ptr(description).to_str()
.expect("debug callback message \
not utf-8");
let layer_prefix = CStr::from_ptr(layer_prefix)
.to_str()
.expect("debug callback message not utf-8");
let description = CStr::from_ptr(description)
.to_str()
.expect("debug callback message not utf-8");
let message = Message {
ty: MessageTypes {
information: (ty & vk::DEBUG_REPORT_INFORMATION_BIT_EXT) != 0,
warning: (ty & vk::DEBUG_REPORT_WARNING_BIT_EXT) != 0,
performance_warning: (ty & vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) != 0,
performance_warning: (ty & vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) !=
0,
error: (ty & vk::DEBUG_REPORT_ERROR_BIT_EXT) != 0,
debug: (ty & vk::DEBUG_REPORT_DEBUG_BIT_EXT) != 0,
},
@ -111,8 +111,8 @@ impl DebugCallback {
// Since we box the closure, the type system doesn't detect that the `UnwindSafe`
// bound is enforced. Therefore we enforce it manually.
let _ = panic::catch_unwind(panic::AssertUnwindSafe(move || {
user_callback(&message);
}));
user_callback(&message);
}));
vk::FALSE
}
@ -120,11 +120,21 @@ impl DebugCallback {
let flags = {
let mut flags = 0;
if messages.information { flags |= vk::DEBUG_REPORT_INFORMATION_BIT_EXT; }
if messages.warning { flags |= vk::DEBUG_REPORT_WARNING_BIT_EXT; }
if messages.performance_warning { flags |= vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; }
if messages.error { flags |= vk::DEBUG_REPORT_ERROR_BIT_EXT; }
if messages.debug { flags |= vk::DEBUG_REPORT_DEBUG_BIT_EXT; }
if messages.information {
flags |= vk::DEBUG_REPORT_INFORMATION_BIT_EXT;
}
if messages.warning {
flags |= vk::DEBUG_REPORT_WARNING_BIT_EXT;
}
if messages.performance_warning {
flags |= vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
}
if messages.error {
flags |= vk::DEBUG_REPORT_ERROR_BIT_EXT;
}
if messages.debug {
flags |= vk::DEBUG_REPORT_DEBUG_BIT_EXT;
}
flags
};
@ -140,16 +150,18 @@ impl DebugCallback {
let debug_report_callback = unsafe {
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateDebugReportCallbackEXT(instance.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateDebugReportCallbackEXT(instance.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(DebugCallback {
instance: instance.clone(),
debug_report_callback: debug_report_callback,
user_callback: user_callback,
})
instance: instance.clone(),
debug_report_callback: debug_report_callback,
user_callback: user_callback,
})
}
/// Initializes a debug callback with errors and warnings.
@ -170,7 +182,8 @@ impl Drop for DebugCallback {
unsafe {
let vk = self.instance.pointers();
vk.DestroyDebugReportCallbackEXT(self.instance.internal_object(),
self.debug_report_callback, ptr::null());
self.debug_report_callback,
ptr::null());
}
}
}
@ -206,7 +219,7 @@ impl MessageTypes {
pub fn errors() -> MessageTypes {
MessageTypes {
error: true,
.. MessageTypes::none()
..MessageTypes::none()
}
}
@ -218,7 +231,7 @@ impl MessageTypes {
error: true,
warning: true,
performance_warning: true,
.. MessageTypes::none()
..MessageTypes::none()
}
}
@ -246,8 +259,8 @@ impl error::Error for DebugCallbackCreationError {
#[inline]
fn description(&self) -> &str {
match *self {
DebugCallbackCreationError::MissingExtension => "the `EXT_debug_report` extension was \
not enabled",
DebugCallbackCreationError::MissingExtension =>
"the `EXT_debug_report` extension was not enabled",
}
}
}

View File

@ -7,21 +7,21 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::collections::HashSet;
use std::error;
use std::ffi::{CString, CStr};
use std::ffi::{CStr, CString};
use std::fmt;
use std::ptr;
use std::str;
use std::collections::HashSet;
use Error;
use OomError;
use VulkanObject;
use check_errors;
use instance::PhysicalDevice;
use instance::loader;
use instance::loader::LoadingError;
use vk;
use check_errors;
macro_rules! extensions {
($sname:ident, $rawname:ident, $($ext:ident => $s:expr,)*) => (
@ -402,7 +402,7 @@ impl From<Error> for SupportedExtensionsError {
err @ Error::OutOfDeviceMemory => {
SupportedExtensionsError::OomError(OomError::from(err))
},
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -415,8 +415,8 @@ pub struct Unbuildable(());
#[cfg(test)]
mod tests {
use instance::{InstanceExtensions, RawInstanceExtensions};
use instance::{DeviceExtensions, RawDeviceExtensions};
use instance::{InstanceExtensions, RawInstanceExtensions};
#[test]
fn empty_extensions() {

View File

@ -7,6 +7,7 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use smallvec::SmallVec;
use std::borrow::Cow;
use std::error;
use std::ffi::CStr;
@ -16,19 +17,18 @@ use std::mem;
use std::ptr;
use std::slice;
use std::sync::Arc;
use smallvec::SmallVec;
use instance::loader;
use instance::loader::LoadingError;
use check_errors;
use Error;
use OomError;
use VulkanObject;
use check_errors;
use instance::loader;
use instance::loader::LoadingError;
use vk;
use features::Features;
use version::Version;
use instance::{InstanceExtensions, RawInstanceExtensions};
use version::Version;
/// An instance of a Vulkan context. This is the main object that should be created by an
/// application before everything else.
@ -114,36 +114,41 @@ impl Instance {
// TODO: add a test for these ^
// TODO: if no allocator is specified by the user, use Rust's allocator instead of leaving
// the choice to Vulkan
pub fn new<'a, L, Ext>(app_infos: Option<&ApplicationInfo>, extensions: Ext,
layers: L) -> Result<Arc<Instance>, InstanceCreationError>
pub fn new<'a, L, Ext>(app_infos: Option<&ApplicationInfo>, extensions: Ext, layers: L)
-> Result<Arc<Instance>, InstanceCreationError>
where L: IntoIterator<Item = &'a &'a str>,
Ext: Into<RawInstanceExtensions>,
Ext: Into<RawInstanceExtensions>
{
let layers = layers.into_iter().map(|&layer| {
CString::new(layer).unwrap()
}).collect::<SmallVec<[_; 16]>>();
let layers = layers
.into_iter()
.map(|&layer| CString::new(layer).unwrap())
.collect::<SmallVec<[_; 16]>>();
Instance::new_inner(app_infos, extensions.into(), layers)
}
fn new_inner(app_infos: Option<&ApplicationInfo>, extensions: RawInstanceExtensions,
layers: SmallVec<[CString; 16]>) -> Result<Arc<Instance>, InstanceCreationError>
{
layers: SmallVec<[CString; 16]>)
-> Result<Arc<Instance>, InstanceCreationError> {
// TODO: For now there are still buggy drivers that will segfault if you don't pass any
// appinfos. Therefore for now we ensure that it can't be `None`.
let def = Default::default();
let app_infos = match app_infos {
Some(a) => Some(a),
None => Some(&def)
None => Some(&def),
};
// Building the CStrings from the `str`s within `app_infos`.
// They need to be created ahead of time, since we pass pointers to them.
let app_infos_strings = if let Some(app_infos) = app_infos {
Some((
app_infos.application_name.clone().map(|n| CString::new(n.as_bytes().to_owned()).unwrap()),
app_infos.engine_name.clone().map(|n| CString::new(n.as_bytes().to_owned()).unwrap())
))
Some((app_infos
.application_name
.clone()
.map(|n| CString::new(n.as_bytes().to_owned()).unwrap()),
app_infos
.engine_name
.clone()
.map(|n| CString::new(n.as_bytes().to_owned()).unwrap())))
} else {
None
};
@ -153,11 +158,33 @@ impl Instance {
Some(vk::ApplicationInfo {
sType: vk::STRUCTURE_TYPE_APPLICATION_INFO,
pNext: ptr::null(),
pApplicationName: app_infos_strings.as_ref().unwrap().0.as_ref().map(|s| s.as_ptr()).unwrap_or(ptr::null()),
applicationVersion: app_infos.application_version.map(|v| v.into_vulkan_version()).unwrap_or(0),
pEngineName: app_infos_strings.as_ref().unwrap().1.as_ref().map(|s| s.as_ptr()).unwrap_or(ptr::null()),
engineVersion: app_infos.engine_version.map(|v| v.into_vulkan_version()).unwrap_or(0),
apiVersion: Version { major: 1, minor: 0, patch: 0 }.into_vulkan_version(), // TODO:
pApplicationName: app_infos_strings
.as_ref()
.unwrap()
.0
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
applicationVersion: app_infos
.application_version
.map(|v| v.into_vulkan_version())
.unwrap_or(0),
pEngineName: app_infos_strings
.as_ref()
.unwrap()
.1
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
engineVersion: app_infos
.engine_version
.map(|v| v.into_vulkan_version())
.unwrap_or(0),
apiVersion: Version {
major: 1,
minor: 0,
patch: 0,
}.into_vulkan_version(), // TODO:
})
} else {
@ -165,15 +192,17 @@ impl Instance {
};
// FIXME: check whether each layer is supported
let layers_ptr = layers.iter().map(|layer| {
layer.as_ptr()
}).collect::<SmallVec<[_; 16]>>();
let layers_ptr = layers
.iter()
.map(|layer| layer.as_ptr())
.collect::<SmallVec<[_; 16]>>();
let extensions_list = extensions.iter().map(|extension| {
extension.as_ptr()
}).collect::<SmallVec<[_; 32]>>();
let extensions_list = extensions
.iter()
.map(|extension| extension.as_ptr())
.collect::<SmallVec<[_; 32]>>();
let entry_points = try!(loader::entry_points());
let entry_points = loader::entry_points()?;
// Creating the Vulkan instance.
let instance = unsafe {
@ -193,26 +222,26 @@ impl Instance {
ppEnabledExtensionNames: extensions_list.as_ptr(),
};
try!(check_errors(entry_points.CreateInstance(&infos, ptr::null(), &mut output)));
check_errors(entry_points.CreateInstance(&infos, ptr::null(), &mut output))?;
output
};
// Loading the function pointers of the newly-created instance.
let vk = {
let f = loader::static_functions().unwrap(); // TODO: return proper error
let f = loader::static_functions().unwrap(); // TODO: return proper error
vk::InstancePointers::load(|name| unsafe {
mem::transmute(f.GetInstanceProcAddr(instance, name.as_ptr()))
})
mem::transmute(f.GetInstanceProcAddr(instance,
name.as_ptr()))
})
};
// Enumerating all physical devices.
let physical_devices: Vec<vk::PhysicalDevice> = unsafe {
let mut num = 0;
try!(check_errors(vk.EnumeratePhysicalDevices(instance, &mut num, ptr::null_mut())));
check_errors(vk.EnumeratePhysicalDevices(instance, &mut num, ptr::null_mut()))?;
let mut devices = Vec::with_capacity(num as usize);
try!(check_errors(vk.EnumeratePhysicalDevices(instance, &mut num,
devices.as_mut_ptr())));
check_errors(vk.EnumeratePhysicalDevices(instance, &mut num, devices.as_mut_ptr()))?;
devices.set_len(num as usize);
devices
};
@ -229,13 +258,13 @@ impl Instance {
};
Ok(Arc::new(Instance {
instance: instance,
//alloc: None,
physical_devices: physical_devices,
vk: vk,
extensions: extensions,
layers: layers,
}))
instance: instance,
//alloc: None,
physical_devices: physical_devices,
vk: vk,
extensions: extensions,
layers: layers,
}))
}
/// Initialize all physical devices
@ -255,8 +284,7 @@ impl Instance {
vk.GetPhysicalDeviceQueueFamilyProperties(device, &mut num, ptr::null_mut());
let mut families = Vec::with_capacity(num as usize);
vk.GetPhysicalDeviceQueueFamilyProperties(device, &mut num,
families.as_mut_ptr());
vk.GetPhysicalDeviceQueueFamilyProperties(device, &mut num, families.as_mut_ptr());
families.set_len(num as usize);
families
};
@ -274,20 +302,22 @@ impl Instance {
};
output.push(PhysicalDeviceInfos {
device: device,
properties: properties,
memory: memory,
queue_families: queue_families,
available_features: Features::from(available_features),
});
device: device,
properties: properties,
memory: memory,
queue_families: queue_families,
available_features: Features::from(available_features),
});
}
output
}
/// Initialize all physical devices, but use VK_KHR_get_physical_device_properties2
/// TODO: Query extension-specific physical device properties, once a new instance extension is supported.
fn init_physical_devices2(vk: &vk::InstancePointers, physical_devices: Vec<vk::PhysicalDevice>,
extensions: &InstanceExtensions) -> Vec<PhysicalDeviceInfos> {
fn init_physical_devices2(vk: &vk::InstancePointers,
physical_devices: Vec<vk::PhysicalDevice>,
extensions: &InstanceExtensions)
-> Vec<PhysicalDeviceInfos> {
let mut output = Vec::with_capacity(physical_devices.len());
for device in physical_devices.into_iter() {
@ -306,17 +336,23 @@ impl Instance {
let mut num = 0;
vk.GetPhysicalDeviceQueueFamilyProperties2KHR(device, &mut num, ptr::null_mut());
let mut families = (0 .. num).map(|_| {
vk::QueueFamilyProperties2KHR {
sType: vk::STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR,
pNext: ptr::null_mut(),
queueFamilyProperties: mem::uninitialized(),
}
}).collect::<Vec<_>>();
let mut families = (0 .. num)
.map(|_| {
vk::QueueFamilyProperties2KHR {
sType: vk::STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR,
pNext: ptr::null_mut(),
queueFamilyProperties: mem::uninitialized(),
}
})
.collect::<Vec<_>>();
vk.GetPhysicalDeviceQueueFamilyProperties2KHR(device, &mut num,
vk.GetPhysicalDeviceQueueFamilyProperties2KHR(device,
&mut num,
families.as_mut_ptr());
families.into_iter().map(|family| family.queueFamilyProperties).collect()
families
.into_iter()
.map(|family| family.queueFamilyProperties)
.collect()
};
let memory: vk::PhysicalDeviceMemoryProperties = unsafe {
@ -340,12 +376,12 @@ impl Instance {
};
output.push(PhysicalDeviceInfos {
device: device,
properties: properties,
memory: memory,
queue_families: queue_families,
available_features: Features::from(available_features),
});
device: device,
properties: properties,
memory: memory,
queue_families: queue_families,
available_features: Features::from(available_features),
});
}
output
}
@ -498,7 +534,7 @@ impl error::Error for InstanceCreationError {
match *self {
InstanceCreationError::LoadingError(ref err) => Some(err),
InstanceCreationError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -534,7 +570,7 @@ impl From<Error> for InstanceCreationError {
Error::LayerNotPresent => InstanceCreationError::LayerNotPresent,
Error::ExtensionNotPresent => InstanceCreationError::ExtensionNotPresent,
Error::IncompatibleDriver => InstanceCreationError::IncompatibleDriver,
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -615,9 +651,9 @@ impl<'a> PhysicalDevice<'a> {
pub fn from_index(instance: &'a Arc<Instance>, index: usize) -> Option<PhysicalDevice<'a>> {
if instance.physical_devices.len() > index {
Some(PhysicalDevice {
instance: instance,
device: index,
})
instance: instance,
device: index,
})
} else {
None
}
@ -651,11 +687,14 @@ impl<'a> PhysicalDevice<'a> {
/// Returns the human-readable name of the device.
#[inline]
pub fn name(&self) -> String { // FIXME: for some reason this panics if you use a `&str`
pub fn name(&self) -> String {
// FIXME: for some reason this panics if you use a `&str`
unsafe {
let val = self.infos().properties.deviceName;
let val = CStr::from_ptr(val.as_ptr());
val.to_str().expect("physical device name contained non-UTF8 characters").to_owned()
val.to_str()
.expect("physical device name contained non-UTF8 characters")
.to_owned()
}
}
@ -676,13 +715,15 @@ impl<'a> PhysicalDevice<'a> {
/// ```
#[inline]
pub fn ty(&self) -> PhysicalDeviceType {
match self.instance.physical_devices[self.device].properties.deviceType {
match self.instance.physical_devices[self.device]
.properties
.deviceType {
vk::PHYSICAL_DEVICE_TYPE_OTHER => PhysicalDeviceType::Other,
vk::PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU => PhysicalDeviceType::IntegratedGpu,
vk::PHYSICAL_DEVICE_TYPE_DISCRETE_GPU => PhysicalDeviceType::DiscreteGpu,
vk::PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU => PhysicalDeviceType::VirtualGpu,
vk::PHYSICAL_DEVICE_TYPE_CPU => PhysicalDeviceType::Cpu,
_ => panic!("Unrecognized Vulkan device type")
_ => panic!("Unrecognized Vulkan device type"),
}
}
@ -713,9 +754,9 @@ impl<'a> PhysicalDevice<'a> {
pub fn queue_family_by_id(&self, id: u32) -> Option<QueueFamily<'a>> {
if (id as usize) < self.infos().queue_families.len() {
Some(QueueFamily {
physical_device: *self,
id: id,
})
physical_device: *self,
id: id,
})
} else {
None
@ -736,9 +777,9 @@ impl<'a> PhysicalDevice<'a> {
pub fn memory_type_by_id(&self, id: u32) -> Option<MemoryType<'a>> {
if id < self.infos().memory.memoryTypeCount {
Some(MemoryType {
physical_device: *self,
id: id,
})
physical_device: *self,
id: id,
})
} else {
None
@ -759,9 +800,9 @@ impl<'a> PhysicalDevice<'a> {
pub fn memory_heap_by_id(&self, id: u32) -> Option<MemoryHeap<'a>> {
if id < self.infos().memory.memoryHeapCount {
Some(MemoryHeap {
physical_device: *self,
id: id,
})
physical_device: *self,
id: id,
})
} else {
None
@ -803,7 +844,8 @@ impl<'a> PhysicalDevice<'a> {
/// Can be stored in a configuration file, so that you can retrieve the device again the next
/// time the program is run.
#[inline]
pub fn uuid(&self) -> &[u8; 16] { // must be equal to vk::UUID_SIZE
pub fn uuid(&self) -> &[u8; 16] {
// must be equal to vk::UUID_SIZE
&self.infos().properties.pipelineCacheUUID
}
@ -925,8 +967,7 @@ impl<'a> QueueFamily<'a> {
/// > operations are ever added to Vulkan.
#[inline]
pub fn supports_transfers(&self) -> bool {
(self.flags() & vk::QUEUE_TRANSFER_BIT) != 0 ||
self.supports_graphics() ||
(self.flags() & vk::QUEUE_TRANSFER_BIT) != 0 || self.supports_graphics() ||
self.supports_compute()
}
@ -976,7 +1017,8 @@ impl<'a> Iterator for QueueFamiliesIter<'a> {
}
}
impl<'a> ExactSizeIterator for QueueFamiliesIter<'a> {}
impl<'a> ExactSizeIterator for QueueFamiliesIter<'a> {
}
/// Represents a memory type in a physical device.
#[derive(Debug, Copy, Clone)]
@ -1002,7 +1044,10 @@ impl<'a> MemoryType<'a> {
#[inline]
pub fn heap(&self) -> MemoryHeap<'a> {
let heap_id = self.physical_device.infos().memory.memoryTypes[self.id as usize].heapIndex;
MemoryHeap { physical_device: self.physical_device, id: heap_id }
MemoryHeap {
physical_device: self.physical_device,
id: heap_id,
}
}
/// Returns true if the memory type is located on the device, which means that it's the most
@ -1087,7 +1132,8 @@ impl<'a> Iterator for MemoryTypesIter<'a> {
}
}
impl<'a> ExactSizeIterator for MemoryTypesIter<'a> {}
impl<'a> ExactSizeIterator for MemoryTypesIter<'a> {
}
/// Represents a memory heap in a physical device.
#[derive(Debug, Copy, Clone)]
@ -1156,7 +1202,8 @@ impl<'a> Iterator for MemoryHeapsIter<'a> {
}
}
impl<'a> ExactSizeIterator for MemoryHeapsIter<'a> {}
impl<'a> ExactSizeIterator for MemoryHeapsIter<'a> {
}
/// Limits of a physical device.
pub struct Limits<'a> {
@ -1300,12 +1347,12 @@ mod tests {
let phys = match instance::PhysicalDevice::enumerate(&instance).next() {
Some(p) => p,
None => return
None => return,
};
let queue_family = match phys.queue_families().next() {
Some(q) => q,
None => return
None => return,
};
let by_id = phys.queue_family_by_id(queue_family.id()).unwrap();

View File

@ -8,18 +8,18 @@
// according to those terms.
use std::error;
use std::fmt;
use std::ffi::CStr;
use std::fmt;
use std::ptr;
use std::vec::IntoIter;
use check_errors;
use OomError;
use Error;
use vk;
use OomError;
use check_errors;
use instance::loader;
use instance::loader::LoadingError;
use version::Version;
use vk;
/// Queries the list of layers that are available when creating an instance.
///
@ -28,7 +28,7 @@ use version::Version;
/// to pass its name (returned by `LayerProperties::name()`) when creating the
/// [`Instance`](struct.Instance.html).
///
/// This function returns an error if it failed to load the Vulkan library.
/// This function returns an error if it failed to load the Vulkan library.
///
/// > **Note**: It is possible that one of the layers enumerated here is no longer available when
/// > you create the `Instance`. This will lead to an error when calling `Instance::new`. The
@ -46,22 +46,21 @@ use version::Version;
/// ```
pub fn layers_list() -> Result<LayersIterator, LayersListError> {
unsafe {
let entry_points = try!(loader::entry_points());
let entry_points = loader::entry_points()?;
let mut num = 0;
try!(check_errors({
entry_points.EnumerateInstanceLayerProperties(&mut num, ptr::null_mut())
}));
check_errors({
entry_points.EnumerateInstanceLayerProperties(&mut num, ptr::null_mut())
})?;
let mut layers: Vec<vk::LayerProperties> = Vec::with_capacity(num as usize);
try!(check_errors({
entry_points.EnumerateInstanceLayerProperties(&mut num, layers.as_mut_ptr())
}));
check_errors({
entry_points
.EnumerateInstanceLayerProperties(&mut num, layers.as_mut_ptr())
})?;
layers.set_len(num as usize);
Ok(LayersIterator {
iter: layers.into_iter()
})
Ok(LayersIterator { iter: layers.into_iter() })
}
}
@ -87,7 +86,11 @@ impl LayerProperties {
/// ```
#[inline]
pub fn name(&self) -> &str {
unsafe { CStr::from_ptr(self.props.layerName.as_ptr()).to_str().unwrap() }
unsafe {
CStr::from_ptr(self.props.layerName.as_ptr())
.to_str()
.unwrap()
}
}
/// Returns a description of the layer.
@ -105,7 +108,11 @@ impl LayerProperties {
/// ```
#[inline]
pub fn description(&self) -> &str {
unsafe { CStr::from_ptr(self.props.description.as_ptr()).to_str().unwrap() }
unsafe {
CStr::from_ptr(self.props.description.as_ptr())
.to_str()
.unwrap()
}
}
/// Returns the version of Vulkan supported by this layer.
@ -200,7 +207,7 @@ impl From<Error> for LayersListError {
match err {
err @ Error::OutOfHostMemory => LayersListError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => LayersListError::OomError(OomError::from(err)),
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -208,7 +215,7 @@ impl From<Error> for LayersListError {
/// Iterator that produces the list of layers that are available.
// TODO: #[derive(Debug, Clone)]
pub struct LayersIterator {
iter: IntoIter<vk::LayerProperties>
iter: IntoIter<vk::LayerProperties>,
}
impl Iterator for LayersIterator {
@ -236,7 +243,7 @@ mod tests {
fn layers_list() {
let mut list = match instance::layers_list() {
Ok(l) => l,
Err(_) => return
Err(_) => return,
};
while let Some(_) = list.next() {}

View File

@ -20,22 +20,17 @@ use vk;
fn load_static() -> Result<vk::Static, LoadingError> {
use std::os::raw::c_char;
extern {
extern "C" {
fn vkGetInstanceProcAddr(instance: vk::Instance, pName: *const c_char)
-> vk::PFN_vkVoidFunction;
}
extern "system" fn wrapper(instance: vk::Instance, pName: *const c_char)
-> vk::PFN_vkVoidFunction
{
unsafe {
vkGetInstanceProcAddr(instance, pName)
}
-> vk::PFN_vkVoidFunction {
unsafe { vkGetInstanceProcAddr(instance, pName) }
}
Ok(vk::Static {
GetInstanceProcAddr: wrapper,
})
Ok(vk::Static { GetInstanceProcAddr: wrapper })
}
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
@ -58,10 +53,11 @@ fn load_static() -> Result<vk::Static, LoadingError> {
let name = name.to_str().unwrap();
match lib.symbol(name) {
Ok(s) => s,
Err(_) => { // TODO: return error?
Err(_) => {
// TODO: return error?
err = Some(LoadingError::MissingEntryPoint(name.to_owned()));
ptr::null()
}
},
}
});
@ -109,7 +105,7 @@ pub fn entry_points() -> Result<&'static vk::EntryPoints, LoadingError> {
#[derive(Debug, Clone)]
pub enum LoadingError {
/// Failed to load the Vulkan shared library.
LibraryLoadFailure(String), // TODO: meh for error type, but this needs changes in shared_library
LibraryLoadFailure(String), // TODO: meh for error type, but this needs changes in shared_library
/// One of the entry points required to be supported by the Vulkan implementation is missing.
MissingEntryPoint(String),

View File

@ -16,7 +16,7 @@
//! ```no_run
//! use vulkano::instance::Instance;
//! use vulkano::instance::InstanceExtensions;
//!
//!
//! let instance = match Instance::new(None, &InstanceExtensions::none(), None) {
//! Ok(i) => i,
//! Err(err) => panic!("Couldn't build instance: {:?}", err)
@ -88,7 +88,7 @@
//! // Builds an `ApplicationInfo` by looking at the content of the `Cargo.toml` file at
//! // compile-time.
//! let app_infos = ApplicationInfo::from_cargo_toml();
//!
//!
//! let _instance = Instance::new(Some(&app_infos), &InstanceExtensions::none(), None).unwrap();
//! ```
//!
@ -104,29 +104,30 @@
//! Once you have chosen a physical device, you can create a `Device` object from it. See the
//! `device` module for more info.
//!
pub use features::Features;
pub use self::extensions::DeviceExtensions;
pub use self::extensions::InstanceExtensions;
pub use self::extensions::RawDeviceExtensions;
pub use self::extensions::RawInstanceExtensions;
pub use self::instance::ApplicationInfo;
pub use self::instance::Instance;
pub use self::instance::InstanceCreationError;
pub use self::instance::ApplicationInfo;
pub use self::instance::Limits;
pub use self::instance::MemoryHeap;
pub use self::instance::MemoryHeapsIter;
pub use self::instance::MemoryType;
pub use self::instance::MemoryTypesIter;
pub use self::instance::PhysicalDevice;
pub use self::instance::PhysicalDevicesIter;
pub use self::instance::PhysicalDeviceType;
pub use self::instance::PhysicalDevicesIter;
pub use self::instance::QueueFamiliesIter;
pub use self::instance::QueueFamily;
pub use self::instance::MemoryTypesIter;
pub use self::instance::MemoryType;
pub use self::instance::MemoryHeapsIter;
pub use self::instance::MemoryHeap;
pub use self::instance::Limits;
pub use self::layers::layers_list;
pub use self::layers::LayerProperties;
pub use self::layers::LayersIterator;
pub use self::layers::LayersListError;
pub use self::layers::layers_list;
pub use self::loader::LoadingError;
pub use features::Features;
pub use version::Version;
pub mod debug;

View File

@ -58,8 +58,8 @@
//!
//#![warn(missing_docs)] // TODO: activate
#![allow(dead_code)] // TODO: remove
#![allow(unused_variables)] // TODO: remove
#![allow(dead_code)] // TODO: remove
#![allow(unused_variables)] // TODO: remove
extern crate crossbeam;
extern crate fnv;
@ -99,9 +99,12 @@ use std::sync::MutexGuard;
/// Alternative to the `Deref` trait. Contrary to `Deref`, must always return the same object.
pub unsafe trait SafeDeref: Deref {}
unsafe impl<'a, T: ?Sized> SafeDeref for &'a T {}
unsafe impl<T: ?Sized> SafeDeref for Arc<T> {}
unsafe impl<T: ?Sized> SafeDeref for Box<T> {}
unsafe impl<'a, T: ?Sized> SafeDeref for &'a T {
}
unsafe impl<T: ?Sized> SafeDeref for Arc<T> {
}
unsafe impl<T: ?Sized> SafeDeref for Box<T> {
}
/// Gives access to the internal identifier of an object.
pub unsafe trait VulkanObject {
@ -154,7 +157,7 @@ impl From<Error> for OomError {
match err {
Error::OutOfHostMemory => OomError::OutOfHostMemory,
Error::OutOfDeviceMemory => OomError::OutOfDeviceMemory,
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -178,7 +181,7 @@ enum Success {
/// panic for error code that arent supposed to happen.
#[derive(Debug, Copy, Clone)]
#[repr(u32)]
#[doc(hidden)] // TODO: this is necessary because of the stupid visibility rules in rustc
#[doc(hidden)] // TODO: this is necessary because of the stupid visibility rules in rustc
pub enum Error {
OutOfHostMemory = vk::ERROR_OUT_OF_HOST_MEMORY,
OutOfDeviceMemory = vk::ERROR_OUT_OF_DEVICE_MEMORY,
@ -226,7 +229,8 @@ fn check_errors(result: vk::Result) -> Result<Success, Error> {
vk::ERROR_INCOMPATIBLE_DISPLAY_KHR => Err(Error::IncompatibleDisplay),
vk::ERROR_VALIDATION_FAILED_EXT => Err(Error::ValidationFailed),
vk::ERROR_OUT_OF_POOL_MEMORY_KHR => Err(Error::OutOfPoolMemory),
vk::ERROR_INVALID_SHADER_NV => panic!("Vulkan function returned VK_ERROR_INVALID_SHADER_NV"),
c => unreachable!("Unexpected error code returned by Vulkan: {}", c)
vk::ERROR_INVALID_SHADER_NV => panic!("Vulkan function returned \
VK_ERROR_INVALID_SHADER_NV"),
c => unreachable!("Unexpected error code returned by Vulkan: {}", c),
}
}

View File

@ -9,20 +9,20 @@
use std::fmt;
use std::mem;
use std::ptr;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Range;
use std::os::raw::c_void;
use std::ptr;
use std::sync::Arc;
use instance::MemoryType;
use device::Device;
use device::DeviceOwned;
use memory::Content;
use OomError;
use VulkanObject;
use check_errors;
use device::Device;
use device::DeviceOwned;
use instance::MemoryType;
use memory::Content;
use vk;
/// Represents memory that has been allocated.
@ -36,7 +36,7 @@ use vk;
///
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
/// let mem_ty = device.physical_device().memory_types().next().unwrap();
///
///
/// // Allocates 1kB of memory.
/// let memory = DeviceMemory::alloc(device.clone(), mem_ty, 1024).unwrap();
/// ```
@ -51,7 +51,7 @@ impl DeviceMemory {
/// Allocates a chunk of memory from the device.
///
/// Some platforms may have a limit on the maximum size of a single allocation. For example,
/// certain systems may fail to create allocations with a size greater than or equal to 4GB.
/// certain systems may fail to create allocations with a size greater than or equal to 4GB.
///
/// # Panic
///
@ -61,8 +61,7 @@ impl DeviceMemory {
// TODO: VK_ERROR_TOO_MANY_OBJECTS error
#[inline]
pub fn alloc(device: Arc<Device>, memory_type: MemoryType, size: usize)
-> Result<DeviceMemory, OomError>
{
-> Result<DeviceMemory, OomError> {
assert!(size >= 1);
assert_eq!(device.physical_device().internal_object(),
memory_type.physical_device().internal_object());
@ -85,17 +84,19 @@ impl DeviceMemory {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.AllocateMemory(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.AllocateMemory(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(DeviceMemory {
memory: memory,
device: device,
size: size,
memory_type_index: memory_type.id(),
})
memory: memory,
device: device,
size: size,
memory_type_index: memory_type.id(),
})
}
/// Allocates a chunk of memory and maps it.
@ -106,34 +107,39 @@ impl DeviceMemory {
/// - Panics if the memory type is not host-visible.
///
pub fn alloc_and_map(device: Arc<Device>, memory_type: MemoryType, size: usize)
-> Result<MappedDeviceMemory, OomError>
{
-> Result<MappedDeviceMemory, OomError> {
let vk = device.pointers();
assert!(memory_type.is_host_visible());
let mem = try!(DeviceMemory::alloc(device.clone(), memory_type, size));
let mem = DeviceMemory::alloc(device.clone(), memory_type, size)?;
let coherent = memory_type.is_host_coherent();
let ptr = unsafe {
let mut output = mem::uninitialized();
try!(check_errors(vk.MapMemory(device.internal_object(), mem.memory, 0,
mem.size as vk::DeviceSize, 0 /* reserved flags */,
&mut output)));
check_errors(vk.MapMemory(device.internal_object(),
mem.memory,
0,
mem.size as vk::DeviceSize,
0, /* reserved flags */
&mut output))?;
output
};
Ok(MappedDeviceMemory {
memory: mem,
pointer: ptr,
coherent: coherent,
})
memory: mem,
pointer: ptr,
coherent: coherent,
})
}
/// Returns the memory type this chunk was allocated on.
#[inline]
pub fn memory_type(&self) -> MemoryType {
self.device.physical_device().memory_type_by_id(self.memory_type_index).unwrap()
self.device
.physical_device()
.memory_type_by_id(self.memory_type_index)
.unwrap()
}
/// Returns the size in bytes of that memory chunk.
@ -185,7 +191,7 @@ impl Drop for DeviceMemory {
/// is not host-accessible.
///
/// In order to access the content of the allocated memory, you can use the `read_write` method.
/// This method returns a guard object that derefs to the content.
/// This method returns a guard object that derefs to the content.
///
/// # Example
///
@ -193,7 +199,7 @@ impl Drop for DeviceMemory {
/// use vulkano::memory::DeviceMemory;
///
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
/// // The memory type must be mappable.
/// // The memory type must be mappable.
/// let mem_ty = device.physical_device().memory_types()
/// .filter(|t| t.is_host_visible())
/// .next().unwrap(); // Vk specs guarantee that this can't fail
@ -253,7 +259,8 @@ impl MappedDeviceMemory {
{
let vk = self.memory.device().pointers();
let pointer = T::ref_from_ptr((self.pointer as usize + range.start) as *mut _,
range.end - range.start).unwrap(); // TODO: error
range.end - range.start)
.unwrap(); // TODO: error
if !self.coherent {
let range = vk::MappedMemoryRange {
@ -298,8 +305,10 @@ unsafe impl DeviceOwned for MappedDeviceMemory {
}
}
unsafe impl Send for MappedDeviceMemory {}
unsafe impl Sync for MappedDeviceMemory {}
unsafe impl Send for MappedDeviceMemory {
}
unsafe impl Sync for MappedDeviceMemory {
}
impl fmt::Debug for MappedDeviceMemory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
@ -334,13 +343,15 @@ impl<'a, T: ?Sized + 'a> CpuAccess<'a, T> {
pointer: f(self.pointer),
mem: self.mem,
coherent: self.coherent,
range: self.range.clone(), // TODO: ?
range: self.range.clone(), // TODO: ?
}
}
}
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {}
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {}
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {
}
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {
}
impl<'a, T: ?Sized + 'a> Deref for CpuAccess<'a, T> {
type Target = T;
@ -375,8 +386,7 @@ impl<'a, T: ?Sized + 'a> Drop for CpuAccess<'a, T> {
// TODO: check result?
unsafe {
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(),
1, &range);
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(), 1, &range);
}
}
}
@ -406,30 +416,38 @@ mod tests {
#[cfg(target_pointer_width = "64")]
fn oom_single() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().filter(|m| !m.is_lazily_allocated())
.next().unwrap();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
match DeviceMemory::alloc(device.clone(), mem_ty, 0xffffffffffffffff) {
Err(OomError::OutOfDeviceMemory) => (),
_ => panic!()
_ => panic!(),
}
}
#[test]
#[ignore] // TODO: test fails for now on Mesa+Intel
#[ignore] // TODO: test fails for now on Mesa+Intel
fn oom_multi() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().filter(|m| !m.is_lazily_allocated())
.next().unwrap();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
let heap_size = mem_ty.heap().size();
let mut allocs = Vec::new();
for _ in 0 .. 4 {
match DeviceMemory::alloc(device.clone(), mem_ty, heap_size / 3) {
Err(OomError::OutOfDeviceMemory) => return, // test succeeded
Ok(a) => allocs.push(a),
_ => ()
_ => (),
}
}

View File

@ -66,13 +66,13 @@
//!
//! ```
//! use vulkano::memory::DeviceMemory;
//!
//!
//! # let device: std::sync::Arc<vulkano::device::Device> = return;
//! // Taking the first memory type for the sake of this example.
//! let ty = device.physical_device().memory_types().next().unwrap();
//!
//!
//! let alloc = DeviceMemory::alloc(device.clone(), ty, 1024).expect("Failed to allocate memory");
//!
//!
//! // The memory is automatically free'd when `alloc` is destroyed.
//! ```
//!

View File

@ -12,12 +12,12 @@ use std::ops::Range;
use std::sync::Arc;
use std::sync::Mutex;
use OomError;
use device::Device;
use instance::Instance;
use instance::MemoryType;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
use OomError;
/// Memory pool that operates on a given memory type.
#[derive(Debug)]
@ -36,18 +36,17 @@ impl StdHostVisibleMemoryTypePool {
/// - Panics if the `device` and `memory_type` don't belong to the same physical device.
///
#[inline]
pub fn new(device: Arc<Device>, memory_type: MemoryType)
-> Arc<StdHostVisibleMemoryTypePool>
{
pub fn new(device: Arc<Device>, memory_type: MemoryType) -> Arc<StdHostVisibleMemoryTypePool> {
assert_eq!(&**device.physical_device().instance() as *const Instance,
&**memory_type.physical_device().instance() as *const Instance);
assert_eq!(device.physical_device().index(), memory_type.physical_device().index());
assert_eq!(device.physical_device().index(),
memory_type.physical_device().index());
Arc::new(StdHostVisibleMemoryTypePool {
device: device.clone(),
memory_type: memory_type.id(),
occupied: Mutex::new(Vec::new()),
})
device: device.clone(),
memory_type: memory_type.id(),
occupied: Mutex::new(Vec::new()),
})
}
/// Allocates memory from the pool.
@ -58,12 +57,14 @@ impl StdHostVisibleMemoryTypePool {
/// - Panics if `alignment` is 0.
///
pub fn alloc(me: &Arc<Self>, size: usize, alignment: usize)
-> Result<StdHostVisibleMemoryTypePoolAlloc, OomError>
{
-> Result<StdHostVisibleMemoryTypePoolAlloc, OomError> {
assert!(size != 0);
assert!(alignment != 0);
#[inline] fn align(val: usize, al: usize) -> usize { al * (1 + (val - 1) / al) }
#[inline]
fn align(val: usize, al: usize) -> usize {
al * (1 + (val - 1) / al)
}
// Find a location.
let mut occupied = me.occupied.lock().unwrap();
@ -78,11 +79,11 @@ impl StdHostVisibleMemoryTypePool {
if entry1_end + size <= entry2.start {
entries.insert(i + 1, entry1_end .. entry1_end + size);
return Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: entry1_end,
size: size,
});
pool: me.clone(),
memory: dev_mem.clone(),
offset: entry1_end,
size: size,
});
}
}
@ -91,29 +92,30 @@ impl StdHostVisibleMemoryTypePool {
if last_end + size <= (**dev_mem).as_ref().size() {
entries.push(last_end .. last_end + size);
return Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: last_end,
size: size,
});
pool: me.clone(),
memory: dev_mem.clone(),
offset: last_end,
size: size,
});
}
}
// We need to allocate a new block.
let new_block = {
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
let new_block = try!(DeviceMemory::alloc_and_map(me.device.clone(), me.memory_type(), to_alloc));
let new_block =
DeviceMemory::alloc_and_map(me.device.clone(), me.memory_type(), to_alloc)?;
Arc::new(new_block)
};
occupied.push((new_block.clone(), vec![0 .. size]));
Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: new_block,
offset: 0,
size: size,
})
pool: me.clone(),
memory: new_block,
offset: 0,
size: size,
})
}
/// Returns the device this pool operates on.
@ -125,7 +127,10 @@ impl StdHostVisibleMemoryTypePool {
/// Returns the memory type this pool operates on.
#[inline]
pub fn memory_type(&self) -> MemoryType {
self.device.physical_device().memory_type_by_id(self.memory_type).unwrap()
self.device
.physical_device()
.memory_type_by_id(self.memory_type)
.unwrap()
}
}
@ -158,8 +163,10 @@ impl Drop for StdHostVisibleMemoryTypePoolAlloc {
fn drop(&mut self) {
let mut occupied = self.pool.occupied.lock().unwrap();
let entries = occupied.iter_mut()
.find(|e| &*e.0 as *const MappedDeviceMemory == &*self.memory).unwrap();
let entries = occupied
.iter_mut()
.find(|e| &*e.0 as *const MappedDeviceMemory == &*self.memory)
.unwrap();
entries.1.retain(|e| e.start != self.offset);
}

View File

@ -7,17 +7,17 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use OomError;
use instance::MemoryType;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
use OomError;
pub use self::pool::StdMemoryPool;
pub use self::pool::StdMemoryPoolAlloc;
pub use self::host_visible::StdHostVisibleMemoryTypePool;
pub use self::host_visible::StdHostVisibleMemoryTypePoolAlloc;
pub use self::non_host_visible::StdNonHostVisibleMemoryTypePool;
pub use self::non_host_visible::StdNonHostVisibleMemoryTypePoolAlloc;
pub use self::pool::StdMemoryPool;
pub use self::pool::StdMemoryPoolAlloc;
mod host_visible;
mod non_host_visible;

View File

@ -12,11 +12,11 @@ use std::ops::Range;
use std::sync::Arc;
use std::sync::Mutex;
use OomError;
use device::Device;
use instance::Instance;
use instance::MemoryType;
use memory::DeviceMemory;
use OomError;
/// Memory pool that operates on a given memory type.
#[derive(Debug)]
@ -36,17 +36,17 @@ impl StdNonHostVisibleMemoryTypePool {
///
#[inline]
pub fn new(device: Arc<Device>, memory_type: MemoryType)
-> Arc<StdNonHostVisibleMemoryTypePool>
{
-> Arc<StdNonHostVisibleMemoryTypePool> {
assert_eq!(&**device.physical_device().instance() as *const Instance,
&**memory_type.physical_device().instance() as *const Instance);
assert_eq!(device.physical_device().index(), memory_type.physical_device().index());
assert_eq!(device.physical_device().index(),
memory_type.physical_device().index());
Arc::new(StdNonHostVisibleMemoryTypePool {
device: device.clone(),
memory_type: memory_type.id(),
occupied: Mutex::new(Vec::new()),
})
device: device.clone(),
memory_type: memory_type.id(),
occupied: Mutex::new(Vec::new()),
})
}
/// Allocates memory from the pool.
@ -57,12 +57,14 @@ impl StdNonHostVisibleMemoryTypePool {
/// - Panics if `alignment` is 0.
///
pub fn alloc(me: &Arc<Self>, size: usize, alignment: usize)
-> Result<StdNonHostVisibleMemoryTypePoolAlloc, OomError>
{
-> Result<StdNonHostVisibleMemoryTypePoolAlloc, OomError> {
assert!(size != 0);
assert!(alignment != 0);
#[inline] fn align(val: usize, al: usize) -> usize { al * (1 + (val - 1) / al) }
#[inline]
fn align(val: usize, al: usize) -> usize {
al * (1 + (val - 1) / al)
}
// Find a location.
let mut occupied = me.occupied.lock().unwrap();
@ -77,11 +79,11 @@ impl StdNonHostVisibleMemoryTypePool {
if entry1_end + size <= entry2.start {
entries.insert(i + 1, entry1_end .. entry1_end + size);
return Ok(StdNonHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: entry1_end,
size: size,
});
pool: me.clone(),
memory: dev_mem.clone(),
offset: entry1_end,
size: size,
});
}
}
@ -90,29 +92,29 @@ impl StdNonHostVisibleMemoryTypePool {
if last_end + size <= dev_mem.size() {
entries.push(last_end .. last_end + size);
return Ok(StdNonHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: last_end,
size: size,
});
pool: me.clone(),
memory: dev_mem.clone(),
offset: last_end,
size: size,
});
}
}
// We need to allocate a new block.
let new_block = {
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
let new_block = try!(DeviceMemory::alloc(me.device.clone(), me.memory_type(), to_alloc));
let new_block = DeviceMemory::alloc(me.device.clone(), me.memory_type(), to_alloc)?;
Arc::new(new_block)
};
occupied.push((new_block.clone(), vec![0 .. size]));
Ok(StdNonHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: new_block,
offset: 0,
size: size,
})
pool: me.clone(),
memory: new_block,
offset: 0,
size: size,
})
}
/// Returns the device this pool operates on.
@ -124,7 +126,10 @@ impl StdNonHostVisibleMemoryTypePool {
/// Returns the memory type this pool operates on.
#[inline]
pub fn memory_type(&self) -> MemoryType {
self.device.physical_device().memory_type_by_id(self.memory_type).unwrap()
self.device
.physical_device()
.memory_type_by_id(self.memory_type)
.unwrap()
}
}
@ -157,8 +162,10 @@ impl Drop for StdNonHostVisibleMemoryTypePoolAlloc {
fn drop(&mut self) {
let mut occupied = self.pool.occupied.lock().unwrap();
let entries = occupied.iter_mut()
.find(|e| &*e.0 as *const DeviceMemory == &*self.memory).unwrap();
let entries = occupied
.iter_mut()
.find(|e| &*e.0 as *const DeviceMemory == &*self.memory)
.unwrap();
entries.1.retain(|e| e.start != self.offset);
}

View File

@ -7,15 +7,18 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use fnv::FnvHasher;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::hash::BuildHasherDefault;
use std::sync::Arc;
use std::sync::Mutex;
use fnv::FnvHasher;
use OomError;
use device::Device;
use instance::MemoryType;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
use memory::pool::AllocLayout;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
@ -23,9 +26,6 @@ use memory::pool::StdHostVisibleMemoryTypePool;
use memory::pool::StdHostVisibleMemoryTypePoolAlloc;
use memory::pool::StdNonHostVisibleMemoryTypePool;
use memory::pool::StdNonHostVisibleMemoryTypePoolAlloc;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
use OomError;
#[derive(Debug)]
pub struct StdMemoryPool {
@ -43,32 +43,37 @@ impl StdMemoryPool {
let hasher = BuildHasherDefault::<FnvHasher>::default();
Arc::new(StdMemoryPool {
device: device.clone(),
pools: Mutex::new(HashMap::with_capacity_and_hasher(cap, hasher)),
})
device: device.clone(),
pools: Mutex::new(HashMap::with_capacity_and_hasher(cap, hasher)),
})
}
}
unsafe impl MemoryPool for Arc<StdMemoryPool> {
type Alloc = StdMemoryPoolAlloc;
fn alloc(&self, memory_type: MemoryType, size: usize, alignment: usize,
layout: AllocLayout) -> Result<StdMemoryPoolAlloc, OomError>
{
fn alloc(&self, memory_type: MemoryType, size: usize, alignment: usize, layout: AllocLayout)
-> Result<StdMemoryPoolAlloc, OomError> {
let mut pools = self.pools.lock().unwrap();
match pools.entry((memory_type.id(), layout)) {
Entry::Occupied(entry) => {
match entry.get() {
&Pool::HostVisible(ref pool) => {
let alloc = try!(StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
},
&Pool::NonHostVisible(ref pool) => {
let alloc = try!(StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
},
}
},
@ -76,18 +81,26 @@ unsafe impl MemoryPool for Arc<StdMemoryPool> {
Entry::Vacant(entry) => {
match memory_type.is_host_visible() {
true => {
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(),
memory_type);
entry.insert(Pool::HostVisible(pool.clone()));
let alloc = try!(StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
},
false => {
let pool = StdNonHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
let pool = StdNonHostVisibleMemoryTypePool::new(self.device.clone(),
memory_type);
entry.insert(Pool::NonHostVisible(pool.clone()));
let alloc = try!(StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
},
}
},

View File

@ -165,10 +165,18 @@ impl Into<vk::PipelineColorBlendAttachmentState> for AttachmentBlend {
alphaBlendOp: self.alpha_op as u32,
colorWriteMask: {
let mut mask = 0;
if self.mask_red { mask |= vk::COLOR_COMPONENT_R_BIT; }
if self.mask_green { mask |= vk::COLOR_COMPONENT_G_BIT; }
if self.mask_blue { mask |= vk::COLOR_COMPONENT_B_BIT; }
if self.mask_alpha { mask |= vk::COLOR_COMPONENT_A_BIT; }
if self.mask_red {
mask |= vk::COLOR_COMPONENT_R_BIT;
}
if self.mask_green {
mask |= vk::COLOR_COMPONENT_G_BIT;
}
if self.mask_blue {
mask |= vk::COLOR_COMPONENT_B_BIT;
}
if self.mask_alpha {
mask |= vk::COLOR_COMPONENT_A_BIT;
}
mask
},
}

View File

@ -8,20 +8,21 @@
// according to those terms.
//! Cache the pipeline objects to disk for faster reloads.
//!
//!
//! A pipeline cache is an opaque type that allow you to cache your graphics and compute
//! pipelines on the disk.
//!
//!
//! You can create either an empty cache or a cache from some initial data. Whenever you create a
//! graphics or compute pipeline, you have the possibility to pass a reference to that cache.
//! TODO: ^ that's not the case yet
//! The Vulkan implementation will then look in the cache for an existing entry, or add one if it
//! doesn't exist.
//!
//!
//! Once that is done, you can extract the data from the cache and store it. See the documentation
//! of [`get_data`](struct.PipelineCache.html#method.get_data) for example of how to store the data
//! on the disk, and [`with_data`](struct.PipelineCache.html#method.with_data) for how to reload it.
//!
use std::mem;
use std::ptr;
use std::sync::Arc;
@ -82,8 +83,7 @@ impl PipelineCache {
/// ```
#[inline]
pub unsafe fn with_data(device: Arc<Device>, initial_data: &[u8])
-> Result<Arc<PipelineCache>, OomError>
{
-> Result<Arc<PipelineCache>, OomError> {
PipelineCache::new_impl(device, Some(initial_data))
}
@ -105,29 +105,32 @@ impl PipelineCache {
// Actual implementation of the constructor.
unsafe fn new_impl(device: Arc<Device>, initial_data: Option<&[u8]>)
-> Result<Arc<PipelineCache>, OomError>
{
-> Result<Arc<PipelineCache>, OomError> {
let vk = device.pointers();
let cache = {
let infos = vk::PipelineCacheCreateInfo {
sType: vk::STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
initialDataSize: initial_data.map(|d| d.len()).unwrap_or(0),
pInitialData: initial_data.map(|d| d.as_ptr() as *const _).unwrap_or(ptr::null()),
pInitialData: initial_data
.map(|d| d.as_ptr() as *const _)
.unwrap_or(ptr::null()),
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreatePipelineCache(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreatePipelineCache(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(Arc::new(PipelineCache {
device: device.clone(),
cache: cache,
}))
device: device.clone(),
cache: cache,
}))
}
/// Merges other pipeline caches into this one.
@ -146,13 +149,18 @@ impl PipelineCache {
unsafe {
let vk = self.device.pointers();
let pipelines = pipelines.into_iter().map(|pipeline| {
assert!(&***pipeline as *const _ != &*self as *const _);
pipeline.cache
}).collect::<Vec<_>>();
let pipelines = pipelines
.into_iter()
.map(|pipeline| {
assert!(&***pipeline as *const _ != &*self as *const _);
pipeline.cache
})
.collect::<Vec<_>>();
try!(check_errors(vk.MergePipelineCaches(self.device.internal_object(), self.cache,
pipelines.len() as u32, pipelines.as_ptr())));
check_errors(vk.MergePipelineCaches(self.device.internal_object(),
self.cache,
pipelines.len() as u32,
pipelines.as_ptr()))?;
Ok(())
}
@ -191,12 +199,16 @@ impl PipelineCache {
let vk = self.device.pointers();
let mut num = 0;
try!(check_errors(vk.GetPipelineCacheData(self.device.internal_object(), self.cache,
&mut num, ptr::null_mut())));
check_errors(vk.GetPipelineCacheData(self.device.internal_object(),
self.cache,
&mut num,
ptr::null_mut()))?;
let mut data: Vec<u8> = Vec::with_capacity(num as usize);
try!(check_errors(vk.GetPipelineCacheData(self.device.internal_object(), self.cache,
&mut num, data.as_mut_ptr() as *mut _)));
check_errors(vk.GetPipelineCacheData(self.device.internal_object(),
self.cache,
&mut num,
data.as_mut_ptr() as *mut _))?;
data.set_len(num as usize);
Ok(data)

View File

@ -17,24 +17,24 @@ use std::sync::Arc;
use descriptor::descriptor::DescriptorDesc;
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
use descriptor::pipeline_layout::PipelineLayout;
use descriptor::pipeline_layout::PipelineLayoutSys;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use descriptor::pipeline_layout::PipelineLayoutCreationError;
use descriptor::pipeline_layout::PipelineLayoutDesc;
use descriptor::pipeline_layout::PipelineLayoutDescNames;
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
use descriptor::pipeline_layout::PipelineLayoutSuperset;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use descriptor::pipeline_layout::PipelineLayoutNotSupersetError;
use descriptor::pipeline_layout::PipelineLayoutSuperset;
use descriptor::pipeline_layout::PipelineLayoutSys;
use pipeline::shader::ComputeShaderEntryPoint;
use pipeline::shader::SpecializationConstants;
use device::Device;
use device::DeviceOwned;
use Error;
use OomError;
use SafeDeref;
use VulkanObject;
use check_errors;
use device::Device;
use device::DeviceOwned;
use vk;
/// A pipeline object that describes to the Vulkan implementation how it should perform compute
@ -56,15 +56,17 @@ struct Inner {
impl ComputePipeline<()> {
/// Builds a new `ComputePipeline`.
pub fn new<Css, Csl>(device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>,
specialization: &Css)
-> Result<ComputePipeline<PipelineLayout<Csl>>, ComputePipelineCreationError>
pub fn new<Css, Csl>(
device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>, specialization: &Css)
-> Result<ComputePipeline<PipelineLayout<Csl>>, ComputePipelineCreationError>
where Csl: PipelineLayoutDescNames + Clone,
Css: SpecializationConstants
{
unsafe {
let pipeline_layout = shader.layout().clone().build(device.clone())?;
ComputePipeline::with_unchecked_pipeline_layout(device, shader, specialization,
ComputePipeline::with_unchecked_pipeline_layout(device,
shader,
specialization,
pipeline_layout)
}
}
@ -75,32 +77,32 @@ impl<Pl> ComputePipeline<Pl> {
///
/// An error will be returned if the pipeline layout isn't a superset of what the shader
/// uses.
pub fn with_pipeline_layout<Css, Csl>(device: Arc<Device>,
shader: &ComputeShaderEntryPoint<Css, Csl>,
specialization: &Css,
pipeline_layout: Pl)
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
pub fn with_pipeline_layout<Css, Csl>(
device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>, specialization: &Css,
pipeline_layout: Pl)
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
where Csl: PipelineLayoutDescNames + Clone,
Css: SpecializationConstants,
Pl: PipelineLayoutAbstract,
Pl: PipelineLayoutAbstract
{
unsafe {
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout, shader.layout())?;
ComputePipeline::with_unchecked_pipeline_layout(device, shader, specialization,
ComputePipeline::with_unchecked_pipeline_layout(device,
shader,
specialization,
pipeline_layout)
}
}
/// Same as `with_pipeline_layout`, but doesn't check whether the pipeline layout is a
/// superset of what the shader expects.
pub unsafe fn with_unchecked_pipeline_layout<Css, Csl>(device: Arc<Device>,
shader: &ComputeShaderEntryPoint<Css, Csl>,
specialization: &Css,
pipeline_layout: Pl)
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
pub unsafe fn with_unchecked_pipeline_layout<Css, Csl>(
device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>, specialization: &Css,
pipeline_layout: Pl)
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
where Csl: PipelineLayoutDescNames + Clone,
Css: SpecializationConstants,
Pl: PipelineLayoutAbstract,
Pl: PipelineLayoutAbstract
{
let vk = device.pointers();
@ -138,18 +140,22 @@ impl<Pl> ComputePipeline<Pl> {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateComputePipelines(device.internal_object(), 0,
1, &infos, ptr::null(), &mut output)));
check_errors(vk.CreateComputePipelines(device.internal_object(),
0,
1,
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(ComputePipeline {
inner: Inner {
device: device.clone(),
pipeline: pipeline,
},
pipeline_layout: pipeline_layout,
})
inner: Inner {
device: device.clone(),
pipeline: pipeline,
},
pipeline_layout: pipeline_layout,
})
}
}
@ -190,7 +196,8 @@ unsafe impl<Pl> ComputePipelineAbstract for ComputePipeline<Pl>
}
unsafe impl<T> ComputePipelineAbstract for T
where T: SafeDeref, T::Target: ComputePipelineAbstract
where T: SafeDeref,
T::Target: ComputePipelineAbstract
{
#[inline]
fn inner(&self) -> ComputePipelineSys {
@ -212,7 +219,9 @@ unsafe impl<'a> VulkanObject for ComputePipelineSys<'a> {
}
}
unsafe impl<Pl> PipelineLayoutAbstract for ComputePipeline<Pl> where Pl: PipelineLayoutAbstract {
unsafe impl<Pl> PipelineLayoutAbstract for ComputePipeline<Pl>
where Pl: PipelineLayoutAbstract
{
#[inline]
fn sys(&self) -> PipelineLayoutSys {
self.layout().sys()
@ -224,7 +233,9 @@ unsafe impl<Pl> PipelineLayoutAbstract for ComputePipeline<Pl> where Pl: Pipelin
}
}
unsafe impl<Pl> PipelineLayoutDesc for ComputePipeline<Pl> where Pl: PipelineLayoutDesc {
unsafe impl<Pl> PipelineLayoutDesc for ComputePipeline<Pl>
where Pl: PipelineLayoutDesc
{
#[inline]
fn num_sets(&self) -> usize {
self.pipeline_layout.num_sets()
@ -251,7 +262,9 @@ unsafe impl<Pl> PipelineLayoutDesc for ComputePipeline<Pl> where Pl: PipelineLay
}
}
unsafe impl<Pl> PipelineLayoutDescNames for ComputePipeline<Pl> where Pl: PipelineLayoutDescNames {
unsafe impl<Pl> PipelineLayoutDescNames for ComputePipeline<Pl>
where Pl: PipelineLayoutDescNames
{
#[inline]
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
self.pipeline_layout.descriptor_by_name(name)
@ -301,12 +314,10 @@ impl error::Error for ComputePipelineCreationError {
fn description(&self) -> &str {
match *self {
ComputePipelineCreationError::OomError(_) => "not enough memory available",
ComputePipelineCreationError::PipelineLayoutCreationError(_) => "error while creating \
the pipeline layout \
object",
ComputePipelineCreationError::IncompatiblePipelineLayout(_) => "the pipeline layout is \
not compatible with what \
the shader expects",
ComputePipelineCreationError::PipelineLayoutCreationError(_) =>
"error while creating the pipeline layout object",
ComputePipelineCreationError::IncompatiblePipelineLayout(_) =>
"the pipeline layout is not compatible with what the shader expects",
}
}
@ -358,7 +369,7 @@ impl From<Error> for ComputePipelineCreationError {
err @ Error::OutOfDeviceMemory => {
ComputePipelineCreationError::OomError(OomError::from(err))
},
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}

View File

@ -8,14 +8,14 @@
// according to those terms.
//! Depth and stencil operations description.
//!
//!
//! After the fragment shader has finished running, each fragment goes through the depth
//! and stencil tests.
//!
//!
//! The depth test passes of fails depending on how the depth value of each fragment compares
//! to the existing depth value in the depth buffer at that fragment's location. Depth values
//! are always between 0.0 and 1.0.
//!
//!
//! The stencil test passes or fails depending on how a reference value compares to the existing
//! value in the stencil buffer at each fragment's location. Depending on the outcome of the
//! depth and stencil tests, the value of the stencil buffer at that location can be updated.
@ -135,10 +135,10 @@ impl Stencil {
pub fn always_keep(&self) -> bool {
match self.compare {
Compare::Always => self.pass_op == StencilOp::Keep &&
self.depth_fail_op == StencilOp::Keep,
self.depth_fail_op == StencilOp::Keep,
Compare::Never => self.fail_op == StencilOp::Keep,
_ => self.pass_op == StencilOp::Keep && self.fail_op == StencilOp::Keep &&
self.depth_fail_op == StencilOp::Keep,
self.depth_fail_op == StencilOp::Keep,
}
}
}
@ -178,7 +178,7 @@ pub enum DepthBounds {
/// The test is disabled. All fragments pass the depth bounds test.
Disabled,
/// Fragments that are within the given range do pass the test. Values are depth values
/// Fragments that are within the given range do pass the test. Values are depth values
/// between 0.0 and 1.0.
Fixed(Range<f32>),

View File

@ -11,7 +11,6 @@
// to avoid duplicating code, so we hide the warnings for now
#![allow(deprecated)]
use std::sync::Arc;
use descriptor::pipeline_layout::EmptyPipelineDesc;
use descriptor::pipeline_layout::PipelineLayoutAbstract;
use descriptor::pipeline_layout::PipelineLayoutDescNames;
@ -19,9 +18,9 @@ use device::Device;
use framebuffer::RenderPassAbstract;
use framebuffer::RenderPassSubpassInterface;
use framebuffer::Subpass;
use pipeline::blend::Blend;
use pipeline::blend::AttachmentsBlend;
use pipeline::blend::AttachmentBlend;
use pipeline::blend::AttachmentsBlend;
use pipeline::blend::Blend;
use pipeline::blend::LogicOp;
use pipeline::depth_stencil::DepthStencil;
use pipeline::graphics_pipeline::GraphicsPipeline;
@ -36,23 +35,46 @@ use pipeline::raster::FrontFace;
use pipeline::raster::PolygonMode;
use pipeline::raster::Rasterization;
use pipeline::shader::EmptyShaderInterfaceDef;
use pipeline::shader::FragmentShaderEntryPoint;
use pipeline::shader::GeometryShaderEntryPoint;
use pipeline::shader::ShaderInterfaceDef;
use pipeline::shader::ShaderInterfaceDefMatch;
use pipeline::shader::VertexShaderEntryPoint;
use pipeline::shader::TessControlShaderEntryPoint;
use pipeline::shader::TessEvaluationShaderEntryPoint;
use pipeline::shader::GeometryShaderEntryPoint;
use pipeline::shader::FragmentShaderEntryPoint;
use pipeline::shader::VertexShaderEntryPoint;
use pipeline::vertex::SingleBufferDefinition;
use pipeline::vertex::VertexDefinition;
use pipeline::viewport::Scissor;
use pipeline::viewport::Viewport;
use pipeline::viewport::ViewportsState;
use std::sync::Arc;
/// Prototype for a `GraphicsPipeline`.
// TODO: we can optimize this by filling directly the raw vk structs
pub struct GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp> {
pub struct GraphicsPipelineBuilder<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp>
{
vertex_input: Vdef,
vertex_shader: Option<VertexShaderEntryPoint<'a, Vsp, Vi, Vo, Vl>>,
input_assembly: InputAssembly,
@ -67,18 +89,34 @@ pub struct GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl
render_pass: Option<Subpass<Rp>>,
}
impl<'a> GraphicsPipelineBuilder<'a, SingleBufferDefinition<()>, (), (), (), (), (),
EmptyShaderInterfaceDef, EmptyShaderInterfaceDef,
EmptyPipelineDesc, (), EmptyShaderInterfaceDef,
EmptyShaderInterfaceDef, EmptyPipelineDesc, (),
EmptyShaderInterfaceDef, EmptyShaderInterfaceDef,
EmptyPipelineDesc, (), EmptyShaderInterfaceDef,
EmptyShaderInterfaceDef, EmptyPipelineDesc, ()>
{
impl<'a>
GraphicsPipelineBuilder<'a,
SingleBufferDefinition<()>,
(),
(),
(),
(),
(),
EmptyShaderInterfaceDef,
EmptyShaderInterfaceDef,
EmptyPipelineDesc,
(),
EmptyShaderInterfaceDef,
EmptyShaderInterfaceDef,
EmptyPipelineDesc,
(),
EmptyShaderInterfaceDef,
EmptyShaderInterfaceDef,
EmptyPipelineDesc,
(),
EmptyShaderInterfaceDef,
EmptyShaderInterfaceDef,
EmptyPipelineDesc,
()> {
/// Builds a new empty builder.
pub(super) fn new() -> Self {
GraphicsPipelineBuilder {
vertex_input: SingleBufferDefinition::new(), // TODO: should be empty attrs instead
vertex_input: SingleBufferDefinition::new(), // TODO: should be empty attrs instead
vertex_shader: None,
input_assembly: InputAssembly::triangle_list(),
tessellation: None,
@ -94,16 +132,58 @@ impl<'a> GraphicsPipelineBuilder<'a, SingleBufferDefinition<()>, (), (), (), (),
}
}
impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi,
Fo, Fl, Rp>
GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
impl<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp>
GraphicsPipelineBuilder<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp>
where Vdef: VertexDefinition<Vi>,
Vl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Fl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Tcl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Tel: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Gl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Vl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Fl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Tcl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Tel: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Gl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
Tci: ShaderInterfaceDefMatch<Vo>,
Tei: ShaderInterfaceDefMatch<Tco>,
Gi: ShaderInterfaceDefMatch<Teo> + ShaderInterfaceDefMatch<Vo>,
@ -111,46 +191,128 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
Tco: ShaderInterfaceDef,
Teo: ShaderInterfaceDef,
Go: ShaderInterfaceDef,
Fi: ShaderInterfaceDefMatch<Go> + ShaderInterfaceDefMatch<Teo> + ShaderInterfaceDefMatch<Vo>,
Fi: ShaderInterfaceDefMatch<Go>
+ ShaderInterfaceDefMatch<Teo>
+ ShaderInterfaceDefMatch<Vo>,
Fo: ShaderInterfaceDef,
Rp: RenderPassAbstract + RenderPassSubpassInterface<Fo>,
Rp: RenderPassAbstract + RenderPassSubpassInterface<Fo>
{
/// Builds the graphics pipeline.
// TODO: replace Box<PipelineLayoutAbstract> with a PipelineUnion struct without template params
pub fn build(self, device: Arc<Device>) -> Result<GraphicsPipeline<Vdef, Box<PipelineLayoutAbstract + Send + Sync>, Rp>, GraphicsPipelineCreationError> {
pub fn build(self, device: Arc<Device>)
-> Result<GraphicsPipeline<Vdef, Box<PipelineLayoutAbstract + Send + Sync>, Rp>,
GraphicsPipelineCreationError> {
// TODO: return errors instead of panicking if missing param
GraphicsPipeline::with_tessellation_and_geometry(device, GraphicsPipelineParams {
vertex_input: self.vertex_input,
vertex_shader: self.vertex_shader.expect("Vertex shader not specified in the builder"),
input_assembly: self.input_assembly,
tessellation: self.tessellation,
geometry_shader: self.geometry_shader,
viewport: self.viewport.expect("Viewport state not specified in the builder"),
raster: self.raster,
multisample: self.multisample,
fragment_shader: self.fragment_shader.expect("Fragment shader not specified in the builder"),
depth_stencil: self.depth_stencil,
blend: self.blend,
render_pass: self.render_pass.expect("Render pass not specified in the builder"),
})
GraphicsPipeline::with_tessellation_and_geometry(device,
GraphicsPipelineParams {
vertex_input: self.vertex_input,
vertex_shader:
self.vertex_shader
.expect("Vertex shader not \
specified in the \
builder"),
input_assembly: self.input_assembly,
tessellation: self.tessellation,
geometry_shader: self.geometry_shader,
viewport:
self.viewport
.expect("Viewport state not \
specified in the \
builder"),
raster: self.raster,
multisample: self.multisample,
fragment_shader:
self.fragment_shader
.expect("Fragment shader not \
specified in the \
builder"),
depth_stencil: self.depth_stencil,
blend: self.blend,
render_pass:
self.render_pass
.expect("Render pass not \
specified in the \
builder"),
})
}
// TODO: add build_with_cache method
}
impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi,
Fo, Fl, Rp>
GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
{
impl<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp>
GraphicsPipelineBuilder<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp> {
// TODO: add pipeline derivate system
/// Sets the vertex input.
#[inline]
pub fn vertex_input<T>(self, vertex_input: T)
-> GraphicsPipelineBuilder<'a, T, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
{
-> GraphicsPipelineBuilder<'a,
T,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp> {
GraphicsPipelineBuilder {
vertex_input: vertex_input,
vertex_shader: self.vertex_shader,
@ -173,20 +335,61 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
/// vertex.
#[inline]
pub fn vertex_input_single_buffer<V>(self)
-> GraphicsPipelineBuilder<'a, SingleBufferDefinition<V>, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
{
-> GraphicsPipelineBuilder<'a,
SingleBufferDefinition<V>,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp> {
self.vertex_input(SingleBufferDefinition::<V>::new())
}
/// Sets the vertex shader to use.
// TODO: correct specialization constants
#[inline]
pub fn vertex_shader<Vi2, Vo2, Vl2>(self, shader: VertexShaderEntryPoint<'a, (), Vi2, Vo2, Vl2>,
pub fn vertex_shader<Vi2, Vo2, Vl2>(self,
shader: VertexShaderEntryPoint<'a, (), Vi2, Vo2, Vl2>,
specialization_constants: ())
-> GraphicsPipelineBuilder<'a, Vdef, (), Vi2, Vo2, Vl2, Tcs, Tci, Tco,
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
{
-> GraphicsPipelineBuilder<'a,
Vdef,
(),
Vi2,
Vo2,
Vl2,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp> {
GraphicsPipelineBuilder {
vertex_input: self.vertex_input,
vertex_shader: Some(shader),
@ -225,7 +428,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn point_list(self) -> Self {
self.primitive_topology(PrimitiveTopology::PointList)
}
/// Sets the topology of the primitives to a list of lines.
///
/// > **Note**: This is equivalent to
@ -234,7 +437,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn line_list(self) -> Self {
self.primitive_topology(PrimitiveTopology::LineList)
}
/// Sets the topology of the primitives to a line strip.
///
/// > **Note**: This is equivalent to
@ -243,7 +446,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn line_strip(self) -> Self {
self.primitive_topology(PrimitiveTopology::LineStrip)
}
/// Sets the topology of the primitives to a list of triangles. Note that this is the default.
///
/// > **Note**: This is equivalent to
@ -252,7 +455,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn triangle_list(self) -> Self {
self.primitive_topology(PrimitiveTopology::TriangleList)
}
/// Sets the topology of the primitives to a triangle strip.
///
/// > **Note**: This is equivalent to
@ -261,7 +464,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn triangle_strip(self) -> Self {
self.primitive_topology(PrimitiveTopology::TriangleStrip)
}
/// Sets the topology of the primitives to a fan of triangles.
///
/// > **Note**: This is equivalent to
@ -270,7 +473,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn triangle_fan(self) -> Self {
self.primitive_topology(PrimitiveTopology::TriangleFan)
}
/// Sets the topology of the primitives to a list of lines with adjacency information.
///
/// > **Note**: This is equivalent to
@ -279,7 +482,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn line_list_with_adjacency(self) -> Self {
self.primitive_topology(PrimitiveTopology::LineListWithAdjacency)
}
/// Sets the topology of the primitives to a line strip with adjacency information.
///
/// > **Note**: This is equivalent to
@ -288,7 +491,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn line_strip_with_adjacency(self) -> Self {
self.primitive_topology(PrimitiveTopology::LineStripWithAdjacency)
}
/// Sets the topology of the primitives to a list of triangles with adjacency information.
///
/// > **Note**: This is equivalent to
@ -297,7 +500,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn triangle_list_with_adjacency(self) -> Self {
self.primitive_topology(PrimitiveTopology::TriangleListWithAdjacency)
}
/// Sets the topology of the primitives to a triangle strip with adjacency information`
///
/// > **Note**: This is equivalent to
@ -306,7 +509,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn triangle_strip_with_adjacency(self) -> Self {
self.primitive_topology(PrimitiveTopology::TriangleStripWithAdjacency)
}
/// Sets the topology of the primitives to a list of patches. Can only be used and must be used
/// with a tessellation shader.
///
@ -333,9 +536,9 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
vertex_shader: self.vertex_shader,
input_assembly: self.input_assembly,
tessellation: Some(GraphicsPipelineParamsTess {
tessellation_control_shader: tessellation_control_shader,
tessellation_evaluation_shader: tessellation_evaluation_shader,
}),
tessellation_control_shader: tessellation_control_shader,
tessellation_evaluation_shader: tessellation_evaluation_shader,
}),
geometry_shader: self.geometry_shader,
viewport: self.viewport,
raster: self.raster,
@ -357,11 +560,32 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
/// Sets the geometry shader to use.
// TODO: correct specialization constants
#[inline]
pub fn geometry_shader<Gi2, Go2, Gl2>(self, shader: GeometryShaderEntryPoint<'a, (), Gi2, Go2, Gl2>,
pub fn geometry_shader<Gi2, Go2, Gl2>(self,
shader: GeometryShaderEntryPoint<'a, (), Gi2, Go2, Gl2>,
specialization_constants: ())
-> GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
Tcl, Tes, Tei, Teo, Tel, (), Gi2, Go2, Gl2, Fs, Fi, Fo, Fl, Rp>
{
-> GraphicsPipelineBuilder<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
(),
Gi2,
Go2,
Gl2,
Fs,
Fi,
Fo,
Fl,
Rp> {
GraphicsPipelineBuilder {
vertex_input: self.vertex_input,
vertex_shader: self.vertex_shader,
@ -409,9 +633,8 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn viewports_dynamic_scissors_fixed<I>(mut self, scissors: I) -> Self
where I: IntoIterator<Item = Scissor>
{
self.viewport = Some(ViewportsState::DynamicViewports {
scissors: scissors.into_iter().collect()
});
self.viewport =
Some(ViewportsState::DynamicViewports { scissors: scissors.into_iter().collect() });
self
}
@ -420,8 +643,8 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
#[inline]
pub fn viewports_dynamic_scissors_irrelevant(mut self, num: u32) -> Self {
self.viewport = Some(ViewportsState::DynamicViewports {
scissors: (0 .. num).map(|_| Scissor::irrelevant()).collect()
});
scissors: (0 .. num).map(|_| Scissor::irrelevant()).collect(),
});
self
}
@ -431,9 +654,8 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
pub fn viewports_fixed_scissors_dynamic<I>(mut self, viewports: I) -> Self
where I: IntoIterator<Item = Viewport>
{
self.viewport = Some(ViewportsState::DynamicScissors {
viewports: viewports.into_iter().collect()
});
self.viewport =
Some(ViewportsState::DynamicScissors { viewports: viewports.into_iter().collect() });
self
}
@ -441,9 +663,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
/// drawing.
#[inline]
pub fn viewports_scissors_dynamic(mut self, num: u32) -> Self {
self.viewport = Some(ViewportsState::Dynamic {
num: num
});
self.viewport = Some(ViewportsState::Dynamic { num: num });
self
}
@ -562,11 +782,32 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
/// The fragment shader is run once for each pixel that is covered by each primitive.
// TODO: correct specialization constants
#[inline]
pub fn fragment_shader<Fi2, Fo2, Fl2>(self, shader: FragmentShaderEntryPoint<'a, (), Fi2, Fo2, Fl2>,
pub fn fragment_shader<Fi2, Fo2, Fl2>(self,
shader: FragmentShaderEntryPoint<'a, (), Fi2, Fo2, Fl2>,
specialization_constants: ())
-> GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, (), Fi2, Fo2, Fl2, Rp>
{
-> GraphicsPipelineBuilder<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
(),
Fi2,
Fo2,
Fl2,
Rp> {
GraphicsPipelineBuilder {
vertex_input: self.vertex_input,
vertex_shader: self.vertex_shader,
@ -680,9 +921,29 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
/// Sets the render pass subpass to use.
#[inline]
pub fn render_pass<Rp2>(self, subpass: Subpass<Rp2>)
-> GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp2>
{
-> GraphicsPipelineBuilder<'a,
Vdef,
Vsp,
Vi,
Vo,
Vl,
Tcs,
Tci,
Tco,
Tcl,
Tes,
Tei,
Teo,
Tel,
Gs,
Gi,
Go,
Gl,
Fs,
Fi,
Fo,
Fl,
Rp2> {
GraphicsPipelineBuilder {
vertex_input: self.vertex_input,
vertex_shader: self.vertex_shader,

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,7 @@
//!
//! The input assembly is the stage where lists of vertices are turned into primitives.
//!
use vk;
/// How the input assembly stage should behave.
@ -67,10 +68,14 @@ impl Into<vk::PrimitiveTopology> for PrimitiveTopology {
PrimitiveTopology::TriangleList => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
PrimitiveTopology::TriangleStrip => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
PrimitiveTopology::TriangleFan => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_FAN,
PrimitiveTopology::LineListWithAdjacency => vk::PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
PrimitiveTopology::LineStripWithAdjacency => vk::PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY,
PrimitiveTopology::TriangleListWithAdjacency => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
PrimitiveTopology::TriangleStripWithAdjacency => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY,
PrimitiveTopology::LineListWithAdjacency =>
vk::PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
PrimitiveTopology::LineStripWithAdjacency =>
vk::PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY,
PrimitiveTopology::TriangleListWithAdjacency =>
vk::PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
PrimitiveTopology::TriangleStripWithAdjacency =>
vk::PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY,
PrimitiveTopology::PatchList { .. } => vk::PRIMITIVE_TOPOLOGY_PATCH_LIST,
}
}
@ -86,7 +91,7 @@ impl PrimitiveTopology {
PrimitiveTopology::TriangleFan => true,
PrimitiveTopology::LineStripWithAdjacency => true,
PrimitiveTopology::TriangleStripWithAdjacency => true,
_ => false
_ => false,
}
}
}

View File

@ -25,7 +25,7 @@
//! write pixels to a framebuffer).
//!
//! # Creating a compute pipeline.
//!
//!
//! In order to create a compute pipeline, you first need a *shader entry point*.
//!
//! TODO: write the rest

View File

@ -8,10 +8,11 @@
// according to those terms.
//! Stage when triangles are turned into pixels.
//!
//!
//! The rasterization is the stage when collections of triangles are turned into collections
//! of pixels or samples.
//!
use vk;
/// State of the rasterizer.
@ -71,7 +72,7 @@ impl DepthBiasControl {
pub fn is_dynamic(&self) -> bool {
match *self {
DepthBiasControl::Dynamic => true,
_ => false
_ => false,
}
}
}

View File

@ -8,17 +8,18 @@
// according to those terms.
//! Stage of a graphics pipeline.
//!
//!
//! In Vulkan, shaders are grouped in *shader modules*. Each shader module is built from SPIR-V
//! code and can contain one or more entry points. Note that for the moment the official
//! GLSL-to-SPIR-V compiler does not support multiple entry points.
//!
//!
//! The vulkano library does not provide any functionnality that checks and introspects the SPIR-V
//! code, therefore the whole shader-related API is unsafe. You are encouraged to use the
//! `vulkano-shaders` crate that will generate Rust code that wraps around vulkano's shaders API.
use std::borrow::Cow;
use std::error;
use std::ffi::CStr;
use std::fmt;
use std::iter;
use std::iter::Empty as EmptyIter;
@ -27,16 +28,15 @@ use std::mem;
use std::ops::Range;
use std::ptr;
use std::sync::Arc;
use std::ffi::CStr;
use format::Format;
use pipeline::input_assembly::PrimitiveTopology;
use device::Device;
use OomError;
use VulkanObject;
use SafeDeref;
use VulkanObject;
use check_errors;
use device::Device;
use vk;
/// Contains SPIR-V code with one or more entry points.
@ -44,14 +44,18 @@ use vk;
/// Note that it is advised to wrap around a `ShaderModule` with a struct that is different for
/// each shader.
#[derive(Debug)]
pub struct ShaderModule<P = Arc<Device>> where P: SafeDeref<Target = Device> {
pub struct ShaderModule<P = Arc<Device>>
where P: SafeDeref<Target = Device>
{
// The module.
module: vk::ShaderModule,
// Pointer to the device.
device: P,
}
impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
impl<P> ShaderModule<P>
where P: SafeDeref<Target = Device>
{
/// Builds a new shader module from SPIR-V.
///
/// # Safety
@ -67,22 +71,24 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
let infos = vk::ShaderModuleCreateInfo {
sType: vk::STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
codeSize: spirv.len(),
pCode: spirv.as_ptr() as *const _,
};
let vk = device.pointers();
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateShaderModule(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateShaderModule(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(Arc::new(ShaderModule {
module: module,
device: device,
}))
module: module,
device: device,
}))
}
/// Gets access to an entry point contained in this module.
@ -97,10 +103,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
/// - The input, output and layout must correctly describe the input, output and layout used
/// by this stage.
///
pub unsafe fn vertex_shader_entry_point<'a, S, I, O, L>
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> VertexShaderEntryPoint<'a, S, I, O, L, P>
{
pub unsafe fn vertex_shader_entry_point<'a, S, I, O, L>(
&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> VertexShaderEntryPoint<'a, S, I, O, L, P> {
VertexShaderEntryPoint {
module: self,
name: name,
@ -123,10 +128,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
/// - The input, output and layout must correctly describe the input, output and layout used
/// by this stage.
///
pub unsafe fn tess_control_shader_entry_point<'a, S, I, O, L>
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> TessControlShaderEntryPoint<'a, S, I, O, L, P>
{
pub unsafe fn tess_control_shader_entry_point<'a, S, I, O, L>(
&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> TessControlShaderEntryPoint<'a, S, I, O, L, P> {
TessControlShaderEntryPoint {
module: self,
name: name,
@ -149,10 +153,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
/// - The input, output and layout must correctly describe the input, output and layout used
/// by this stage.
///
pub unsafe fn tess_evaluation_shader_entry_point<'a, S, I, O, L>
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> TessEvaluationShaderEntryPoint<'a, S, I, O, L, P>
{
pub unsafe fn tess_evaluation_shader_entry_point<'a, S, I, O, L>(
&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> TessEvaluationShaderEntryPoint<'a, S, I, O, L, P> {
TessEvaluationShaderEntryPoint {
module: self,
name: name,
@ -175,10 +178,10 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
/// - The input, output and layout must correctly describe the input, output and layout used
/// by this stage.
///
pub unsafe fn geometry_shader_entry_point<'a, S, I, O, L>
(&'a self, name: &'a CStr, primitives: GeometryShaderExecutionMode, input: I,
output: O, layout: L) -> GeometryShaderEntryPoint<'a, S, I, O, L, P>
{
pub unsafe fn geometry_shader_entry_point<'a, S, I, O, L>(
&'a self, name: &'a CStr, primitives: GeometryShaderExecutionMode, input: I, output: O,
layout: L)
-> GeometryShaderEntryPoint<'a, S, I, O, L, P> {
GeometryShaderEntryPoint {
module: self,
name: name,
@ -202,10 +205,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
/// - The input, output and layout must correctly describe the input, output and layout used
/// by this stage.
///
pub unsafe fn fragment_shader_entry_point<'a, S, I, O, L>
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> FragmentShaderEntryPoint<'a, S, I, O, L, P>
{
pub unsafe fn fragment_shader_entry_point<'a, S, I, O, L>(
&'a self, name: &'a CStr, input: I, output: O, layout: L)
-> FragmentShaderEntryPoint<'a, S, I, O, L, P> {
FragmentShaderEntryPoint {
module: self,
name: name,
@ -229,8 +231,7 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
///
#[inline]
pub unsafe fn compute_shader_entry_point<'a, S, L>(&'a self, name: &'a CStr, layout: L)
-> ComputeShaderEntryPoint<'a, S, L, P>
{
-> ComputeShaderEntryPoint<'a, S, L, P> {
ComputeShaderEntryPoint {
module: self,
name: name,
@ -240,7 +241,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
}
}
unsafe impl<P> VulkanObject for ShaderModule<P> where P: SafeDeref<Target = Device> {
unsafe impl<P> VulkanObject for ShaderModule<P>
where P: SafeDeref<Target = Device>
{
type Object = vk::ShaderModule;
#[inline]
@ -249,7 +252,9 @@ unsafe impl<P> VulkanObject for ShaderModule<P> where P: SafeDeref<Target = Devi
}
}
impl<P> Drop for ShaderModule<P> where P: SafeDeref<Target = Device> {
impl<P> Drop for ShaderModule<P>
where P: SafeDeref<Target = Device>
{
#[inline]
fn drop(&mut self) {
unsafe {
@ -263,7 +268,7 @@ impl<P> Drop for ShaderModule<P> where P: SafeDeref<Target = Device> {
///
/// Can be obtained by calling `vertex_shader_entry_point()` on the shader module.
#[derive(Debug, Copy, Clone)]
pub struct VertexShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
pub struct VertexShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
where P: 'a + SafeDeref<Target = Device>
{
module: &'a ShaderModule<P>,
@ -313,7 +318,7 @@ impl<'a, S, I, O, L, P> VertexShaderEntryPoint<'a, S, I, O, L, P>
///
/// Can be obtained by calling `tess_control_shader_entry_point()` on the shader module.
#[derive(Debug, Copy, Clone)]
pub struct TessControlShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
pub struct TessControlShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
where P: 'a + SafeDeref<Target = Device>
{
module: &'a ShaderModule<P>,
@ -324,7 +329,7 @@ pub struct TessControlShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
marker: PhantomData<S>,
}
impl<'a, S, I, O, L, P> TessControlShaderEntryPoint<'a, S, I, O, L, P>
impl<'a, S, I, O, L, P> TessControlShaderEntryPoint<'a, S, I, O, L, P>
where P: 'a + SafeDeref<Target = Device>
{
/// Returns the module this entry point comes from.
@ -362,7 +367,7 @@ impl<'a, S, I, O, L, P> TessControlShaderEntryPoint<'a, S, I, O, L, P>
///
/// Can be obtained by calling `tess_evaluation_shader_entry_point()` on the shader module.
#[derive(Debug, Copy, Clone)]
pub struct TessEvaluationShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
pub struct TessEvaluationShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
where P: 'a + SafeDeref<Target = Device>
{
module: &'a ShaderModule<P>,
@ -373,7 +378,7 @@ pub struct TessEvaluationShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
marker: PhantomData<S>,
}
impl<'a, S, I, O, L, P> TessEvaluationShaderEntryPoint<'a, S, I, O, L, P>
impl<'a, S, I, O, L, P> TessEvaluationShaderEntryPoint<'a, S, I, O, L, P>
where P: 'a + SafeDeref<Target = Device>
{
/// Returns the module this entry point comes from.
@ -411,7 +416,7 @@ impl<'a, S, I, O, L, P> TessEvaluationShaderEntryPoint<'a, S, I, O, L, P>
///
/// Can be obtained by calling `geometry_shader_entry_point()` on the shader module.
#[derive(Debug, Copy, Clone)]
pub struct GeometryShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
pub struct GeometryShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
where P: 'a + SafeDeref<Target = Device>
{
module: &'a ShaderModule<P>,
@ -423,7 +428,7 @@ pub struct GeometryShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
marker: PhantomData<S>,
}
impl<'a, S, I, O, L, P> GeometryShaderEntryPoint<'a, S, I, O, L, P>
impl<'a, S, I, O, L, P> GeometryShaderEntryPoint<'a, S, I, O, L, P>
where P: 'a + SafeDeref<Target = Device>
{
/// Returns the module this entry point comes from.
@ -502,7 +507,7 @@ impl GeometryShaderExecutionMode {
///
/// Can be obtained by calling `fragment_shader_entry_point()` on the shader module.
#[derive(Debug, Copy, Clone)]
pub struct FragmentShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
pub struct FragmentShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
where P: 'a + SafeDeref<Target = Device>
{
module: &'a ShaderModule<P>,
@ -513,7 +518,7 @@ pub struct FragmentShaderEntryPoint<'a, S, I, O, L, P = Arc<Device>>
marker: PhantomData<S>,
}
impl<'a, S, I, O, L, P> FragmentShaderEntryPoint<'a, S, I, O, L, P>
impl<'a, S, I, O, L, P> FragmentShaderEntryPoint<'a, S, I, O, L, P>
where P: 'a + SafeDeref<Target = Device>
{
/// Returns the module this entry point comes from.
@ -551,7 +556,7 @@ impl<'a, S, I, O, L, P> FragmentShaderEntryPoint<'a, S, I, O, L, P>
///
/// Can be obtained by calling `compute_shader_entry_point()` on the shader module.
#[derive(Debug, Copy, Clone)]
pub struct ComputeShaderEntryPoint<'a, S, L, P = Arc<Device>>
pub struct ComputeShaderEntryPoint<'a, S, L, P = Arc<Device>>
where P: 'a + SafeDeref<Target = Device>
{
module: &'a ShaderModule<P>,
@ -560,7 +565,7 @@ pub struct ComputeShaderEntryPoint<'a, S, L, P = Arc<Device>>
marker: PhantomData<S>,
}
impl<'a, S, L, P> ComputeShaderEntryPoint<'a, S, L, P>
impl<'a, S, L, P> ComputeShaderEntryPoint<'a, S, L, P>
where P: 'a + SafeDeref<Target = Device>
{
/// Returns the module this entry point comes from.
@ -624,14 +629,17 @@ unsafe impl ShaderInterfaceDef for EmptyShaderInterfaceDef {
/// Extension trait for `ShaderInterfaceDef` that specifies that the interface is potentially
/// compatible with another one.
pub unsafe trait ShaderInterfaceDefMatch<I>: ShaderInterfaceDef where I: ShaderInterfaceDef {
pub unsafe trait ShaderInterfaceDefMatch<I>: ShaderInterfaceDef
where I: ShaderInterfaceDef
{
/// Returns `Ok` if the two definitions match.
fn matches(&self, other: &I) -> Result<(), ShaderInterfaceMismatchError>;
}
// TODO: turn this into a default impl that can be specialized
unsafe impl<T, I> ShaderInterfaceDefMatch<I> for T
where T: ShaderInterfaceDef, I: ShaderInterfaceDef
where T: ShaderInterfaceDef,
I: ShaderInterfaceDef
{
fn matches(&self, other: &I) -> Result<(), ShaderInterfaceMismatchError> {
if self.elements().len() != other.elements().len() {
@ -640,8 +648,12 @@ unsafe impl<T, I> ShaderInterfaceDefMatch<I> for T
for a in self.elements() {
for loc in a.location.clone() {
let b = match other.elements().find(|e| loc >= e.location.start && loc < e.location.end) {
None => return Err(ShaderInterfaceMismatchError::MissingElement { location: loc }),
let b = match other
.elements()
.find(|e| loc >= e.location.start && loc < e.location.end) {
None => return Err(ShaderInterfaceMismatchError::MissingElement {
location: loc,
}),
Some(b) => b,
};
@ -674,11 +686,11 @@ impl error::Error for ShaderInterfaceMismatchError {
#[inline]
fn description(&self) -> &str {
match *self {
ShaderInterfaceMismatchError::ElementsCountMismatch => "the number of elements \
mismatches",
ShaderInterfaceMismatchError::ElementsCountMismatch =>
"the number of elements mismatches",
ShaderInterfaceMismatchError::MissingElement { .. } => "an element is missing",
ShaderInterfaceMismatchError::FormatMismatch => "the format of an element does not \
match",
ShaderInterfaceMismatchError::FormatMismatch =>
"the format of an element does not match",
}
}
}

View File

@ -11,14 +11,15 @@ use std::error;
use std::fmt;
use std::sync::Arc;
use SafeDeref;
use buffer::BufferAccess;
use format::Format;
use pipeline::vertex::VertexMemberTy;
use SafeDeref;
use vk;
/// Trait for types that describe the definition of the vertex input used by a graphics pipeline.
pub unsafe trait VertexDefinition<I>: VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> {
pub unsafe trait VertexDefinition<I>
: VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> {
/// Iterator that returns the offset, the stride (in bytes) and input rate of each buffer.
type BuffersIter: ExactSizeIterator<Item = (u32, usize, InputRate)>;
/// Iterator that returns the attribute location, buffer id, and infos.
@ -26,18 +27,22 @@ pub unsafe trait VertexDefinition<I>: VertexSource<Vec<Arc<BufferAccess + Send +
/// Builds the vertex definition to use to link this definition to a vertex shader's input
/// interface.
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
IncompatibleVertexDefinitionError>;
fn definition(
&self, interface: &I)
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError>;
}
unsafe impl<I, T> VertexDefinition<I> for T where T: SafeDeref, T::Target: VertexDefinition<I> {
unsafe impl<I, T> VertexDefinition<I> for T
where T: SafeDeref,
T::Target: VertexDefinition<I>
{
type BuffersIter = <T::Target as VertexDefinition<I>>::BuffersIter;
type AttribsIter = <T::Target as VertexDefinition<I>>::AttribsIter;
#[inline]
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
IncompatibleVertexDefinitionError>
{
fn definition(
&self, interface: &I)
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
(**self).definition(interface)
}
}
@ -111,7 +116,10 @@ pub unsafe trait VertexSource<L> {
fn decode(&self, L) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize);
}
unsafe impl<L, T> VertexSource<L> for T where T: SafeDeref, T::Target: VertexSource<L> {
unsafe impl<L, T> VertexSource<L> for T
where T: SafeDeref,
T::Target: VertexSource<L>
{
#[inline]
fn decode(&self, list: L) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
(**self).decode(list)

View File

@ -44,21 +44,21 @@
//! use vulkano::pipeline::vertex::;
//! # let device: Arc<Device> = return;
//! # let queue: Arc<Queue> = return;
//!
//!
//! struct Vertex {
//! position: [f32; 2]
//! }
//!
//!
//! impl_vertex!(Vertex, position);
//!
//!
//! let usage = BufferUsage {
//! vertex_buffer: true,
//! .. BufferUsage::none()
//! };
//!
//!
//! let vertex_buffer = BufferAccess::<[Vertex], _>::array(&device, 128, &usage, HostVisible, &queue)
//! .expect("failed to create buffer");
//!
//!
//! // TODO: finish example
//! # }
//! ```

View File

@ -28,18 +28,22 @@ pub struct OneVertexOneInstanceDefinition<T, U>(pub PhantomData<(T, U)>);
impl<T, U> OneVertexOneInstanceDefinition<T, U> {
#[inline]
pub fn new() -> OneVertexOneInstanceDefinition<T, U> { OneVertexOneInstanceDefinition(PhantomData) }
pub fn new() -> OneVertexOneInstanceDefinition<T, U> {
OneVertexOneInstanceDefinition(PhantomData)
}
}
unsafe impl<T, U, I> VertexDefinition<I> for OneVertexOneInstanceDefinition<T, U>
where T: Vertex, U: Vertex, I: ShaderInterfaceDef
where T: Vertex,
U: Vertex,
I: ShaderInterfaceDef
{
type BuffersIter = VecIntoIter<(u32, usize, InputRate)>;
type AttribsIter = VecIntoIter<(u32, u32, AttributeInfo)>;
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
IncompatibleVertexDefinitionError>
{
fn definition(
&self, interface: &I)
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
let attrib = {
let mut attribs = Vec::with_capacity(interface.elements().len());
for e in interface.elements() {
@ -51,43 +55,52 @@ unsafe impl<T, U, I> VertexDefinition<I> for OneVertexOneInstanceDefinition<T, U
(infos, 1)
} else {
return Err(IncompatibleVertexDefinitionError::MissingAttribute {
attribute: name.clone().into_owned()
});
attribute: name.clone().into_owned(),
});
};
if !infos.ty.matches(infos.array_size, e.format,
if !infos.ty.matches(infos.array_size,
e.format,
e.location.end - e.location.start)
{
return Err(IncompatibleVertexDefinitionError::FormatMismatch {
attribute: name.clone().into_owned(),
shader: (e.format, (e.location.end - e.location.start) as usize),
definition: (infos.ty, infos.array_size),
})
attribute: name.clone().into_owned(),
shader: (e.format, (e.location.end - e.location.start) as usize),
definition: (infos.ty, infos.array_size),
});
}
let mut offset = infos.offset;
for loc in e.location.clone() {
attribs.push((loc, buf_offset, AttributeInfo { offset: offset, format: e.format }));
attribs.push((loc,
buf_offset,
AttributeInfo {
offset: offset,
format: e.format,
}));
offset += e.format.size().unwrap();
}
}
attribs
}.into_iter(); // TODO: meh
}.into_iter(); // TODO: meh
let buffers = vec![
(0, mem::size_of::<T>(), InputRate::Vertex),
(1, mem::size_of::<U>(), InputRate::Instance)
(1, mem::size_of::<U>(), InputRate::Instance),
].into_iter();
Ok((buffers, attrib))
}
}
unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for OneVertexOneInstanceDefinition<T, U>
where T: Vertex, U: Vertex
unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>>
for OneVertexOneInstanceDefinition<T, U>
where T: Vertex,
U: Vertex
{
#[inline]
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>)
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
// FIXME: safety
assert_eq!(source.len(), 2);
let len = source[0].size() / mem::size_of::<T>();
@ -99,8 +112,10 @@ unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for OneVert
}
unsafe impl<'a, T, U, Bt, Bu> VertexSource<(Bt, Bu)> for OneVertexOneInstanceDefinition<T, U>
where T: Vertex, Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
U: Vertex, Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
where T: Vertex,
Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
U: Vertex,
Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
{
#[inline]
fn decode(&self, source: (Bt, Bu)) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {

View File

@ -28,18 +28,21 @@ pub struct SingleBufferDefinition<T>(pub PhantomData<T>);
impl<T> SingleBufferDefinition<T> {
#[inline]
pub fn new() -> SingleBufferDefinition<T> { SingleBufferDefinition(PhantomData) }
pub fn new() -> SingleBufferDefinition<T> {
SingleBufferDefinition(PhantomData)
}
}
unsafe impl<T, I> VertexDefinition<I> for SingleBufferDefinition<T>
where T: Vertex, I: ShaderInterfaceDef
where T: Vertex,
I: ShaderInterfaceDef
{
type BuffersIter = OptionIntoIter<(u32, usize, InputRate)>;
type AttribsIter = VecIntoIter<(u32, u32, AttributeInfo)>;
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
IncompatibleVertexDefinitionError>
{
fn definition(
&self, interface: &I)
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
let attrib = {
let mut attribs = Vec::with_capacity(interface.elements().len());
for e in interface.elements() {
@ -48,28 +51,34 @@ unsafe impl<T, I> VertexDefinition<I> for SingleBufferDefinition<T>
let infos = match <T as Vertex>::member(name) {
Some(m) => m,
None => return Err(IncompatibleVertexDefinitionError::MissingAttribute {
attribute: name.clone().into_owned()
})
attribute: name.clone().into_owned(),
}),
};
if !infos.ty.matches(infos.array_size, e.format,
if !infos.ty.matches(infos.array_size,
e.format,
e.location.end - e.location.start)
{
return Err(IncompatibleVertexDefinitionError::FormatMismatch {
attribute: name.clone().into_owned(),
shader: (e.format, (e.location.end - e.location.start) as usize),
definition: (infos.ty, infos.array_size),
})
attribute: name.clone().into_owned(),
shader: (e.format, (e.location.end - e.location.start) as usize),
definition: (infos.ty, infos.array_size),
});
}
let mut offset = infos.offset;
for loc in e.location.clone() {
attribs.push((loc, 0, AttributeInfo { offset: offset, format: e.format }));
attribs.push((loc,
0,
AttributeInfo {
offset: offset,
format: e.format,
}));
offset += e.format.size().unwrap();
}
}
attribs
}.into_iter(); // TODO: meh
}.into_iter(); // TODO: meh
let buffers = Some((0, mem::size_of::<T>(), InputRate::Vertex)).into_iter();
Ok((buffers, attrib))
@ -80,7 +89,8 @@ unsafe impl<V> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for SingleBuff
where V: Vertex
{
#[inline]
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>)
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
// FIXME: safety
assert_eq!(source.len(), 1);
let len = source[0].size() / mem::size_of::<V>();
@ -89,7 +99,8 @@ unsafe impl<V> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for SingleBuff
}
unsafe impl<'a, B, V> VertexSource<B> for SingleBufferDefinition<V>
where B: TypedBufferAccess<Content = [V]> + Send + Sync + 'static, V: Vertex
where B: TypedBufferAccess<Content = [V]> + Send + Sync + 'static,
V: Vertex
{
#[inline]
fn decode(&self, source: B) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {

View File

@ -28,18 +28,22 @@ pub struct TwoBuffersDefinition<T, U>(pub PhantomData<(T, U)>);
impl<T, U> TwoBuffersDefinition<T, U> {
#[inline]
pub fn new() -> TwoBuffersDefinition<T, U> { TwoBuffersDefinition(PhantomData) }
pub fn new() -> TwoBuffersDefinition<T, U> {
TwoBuffersDefinition(PhantomData)
}
}
unsafe impl<T, U, I> VertexDefinition<I> for TwoBuffersDefinition<T, U>
where T: Vertex, U: Vertex, I: ShaderInterfaceDef
where T: Vertex,
U: Vertex,
I: ShaderInterfaceDef
{
type BuffersIter = VecIntoIter<(u32, usize, InputRate)>;
type AttribsIter = VecIntoIter<(u32, u32, AttributeInfo)>;
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
IncompatibleVertexDefinitionError>
{
fn definition(
&self, interface: &I)
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
let attrib = {
let mut attribs = Vec::with_capacity(interface.elements().len());
for e in interface.elements() {
@ -51,32 +55,38 @@ unsafe impl<T, U, I> VertexDefinition<I> for TwoBuffersDefinition<T, U>
(infos, 1)
} else {
return Err(IncompatibleVertexDefinitionError::MissingAttribute {
attribute: name.clone().into_owned()
});
attribute: name.clone().into_owned(),
});
};
if !infos.ty.matches(infos.array_size, e.format,
if !infos.ty.matches(infos.array_size,
e.format,
e.location.end - e.location.start)
{
return Err(IncompatibleVertexDefinitionError::FormatMismatch {
attribute: name.clone().into_owned(),
shader: (e.format, (e.location.end - e.location.start) as usize),
definition: (infos.ty, infos.array_size),
})
attribute: name.clone().into_owned(),
shader: (e.format, (e.location.end - e.location.start) as usize),
definition: (infos.ty, infos.array_size),
});
}
let mut offset = infos.offset;
for loc in e.location.clone() {
attribs.push((loc, buf_offset, AttributeInfo { offset: offset, format: e.format }));
attribs.push((loc,
buf_offset,
AttributeInfo {
offset: offset,
format: e.format,
}));
offset += e.format.size().unwrap();
}
}
attribs
}.into_iter(); // TODO: meh
}.into_iter(); // TODO: meh
let buffers = vec![
(0, mem::size_of::<T>(), InputRate::Vertex),
(1, mem::size_of::<U>(), InputRate::Vertex)
(1, mem::size_of::<U>(), InputRate::Vertex),
].into_iter();
Ok((buffers, attrib))
@ -84,21 +94,29 @@ unsafe impl<T, U, I> VertexDefinition<I> for TwoBuffersDefinition<T, U>
}
unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for TwoBuffersDefinition<T, U>
where T: Vertex, U: Vertex
where T: Vertex,
U: Vertex
{
#[inline]
fn decode(&self, source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
unimplemented!() // FIXME: implement
fn decode(&self, source: Vec<Arc<BufferAccess + Send + Sync>>)
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
unimplemented!() // FIXME: implement
}
}
unsafe impl<'a, T, U, Bt, Bu> VertexSource<(Bt, Bu)> for TwoBuffersDefinition<T, U>
where T: Vertex, Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
U: Vertex, Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
where T: Vertex,
Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
U: Vertex,
Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
{
#[inline]
fn decode(&self, source: (Bt, Bu)) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
let vertices = [source.0.len(), source.1.len()].iter().cloned().min().unwrap();
let vertices = [source.0.len(), source.1.len()]
.iter()
.cloned()
.min()
.unwrap();
(vec![Box::new(source.0) as Box<_>, Box::new(source.1) as Box<_>], vertices, 1)
}
}

View File

@ -47,6 +47,7 @@
//!
//! In all cases the number of viewports and scissor boxes must be the same.
//!
use std::ops::Range;
use vk;
@ -121,7 +122,7 @@ impl ViewportsState {
/// State of a single viewport.
// FIXME: check that:
// x + width must be less than or equal to viewportBoundsRange[0]
// y + height must be less than or equal to viewportBoundsRange[1]
// y + height must be less than or equal to viewportBoundsRange[1]
#[derive(Debug, Clone)]
pub struct Viewport {
/// Coordinates in pixels of the top-left hand corner of the viewport.
@ -158,7 +159,7 @@ impl Into<vk::Viewport> for Viewport {
/// State of a single scissor box.
// FIXME: add a check:
// Evaluation of (offset.x + extent.width) must not cause a signed integer addition overflow
// Evaluation of (offset.y + extent.height) must not cause a signed integer addition overflow
// Evaluation of (offset.y + extent.height) must not cause a signed integer addition overflow
#[derive(Debug, Copy, Clone)]
pub struct Scissor {
/// Coordinates in pixels of the top-left hand corner of the box.

View File

@ -8,7 +8,7 @@
// according to those terms.
//! This module provides support for query pools.
//!
//!
//! In Vulkan, queries are not created individually. Instead you manipulate **query pools**, which
//! represent a collection of queries. Whenever you use a query, you have to specify both the query
//! pool and the slot id within that query pool.
@ -21,24 +21,27 @@ use std::sync::Arc;
use device::Device;
use check_errors;
use Error;
use OomError;
use SafeDeref;
use VulkanObject;
use check_errors;
use vk;
pub struct UnsafeQueryPool<P = Arc<Device>> where P: SafeDeref<Target = Device> {
pub struct UnsafeQueryPool<P = Arc<Device>>
where P: SafeDeref<Target = Device>
{
pool: vk::QueryPool,
device: P,
num_slots: u32,
}
impl<P> UnsafeQueryPool<P> where P: SafeDeref<Target = Device> {
impl<P> UnsafeQueryPool<P>
where P: SafeDeref<Target = Device>
{
/// Builds a new query pool.
pub fn new(device: P, ty: QueryType, num_slots: u32)
-> Result<UnsafeQueryPool<P>, QueryPoolCreationError>
{
-> Result<UnsafeQueryPool<P>, QueryPoolCreationError> {
let (vk_ty, statistics) = match ty {
QueryType::Occlusion => (vk::QUERY_TYPE_OCCLUSION, 0),
QueryType::Timestamp => (vk::QUERY_TYPE_TIMESTAMP, 0),
@ -55,7 +58,7 @@ impl<P> UnsafeQueryPool<P> where P: SafeDeref<Target = Device> {
let infos = vk::QueryPoolCreateInfo {
sType: vk::STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
queryType: vk_ty,
queryCount: num_slots,
pipelineStatistics: statistics,
@ -63,16 +66,18 @@ impl<P> UnsafeQueryPool<P> where P: SafeDeref<Target = Device> {
let mut output = mem::uninitialized();
let vk = device.pointers();
try!(check_errors(vk.CreateQueryPool(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateQueryPool(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(UnsafeQueryPool {
pool: pool,
device: device,
num_slots: num_slots,
})
pool: pool,
device: device,
num_slots: num_slots,
})
}
/// Returns the number of slots of that query pool.
@ -169,7 +174,9 @@ impl Into<vk::QueryPipelineStatisticFlags> for QueryPipelineStatisticFlags {
}
}
impl<P> Drop for UnsafeQueryPool<P> where P: SafeDeref<Target = Device> {
impl<P> Drop for UnsafeQueryPool<P>
where P: SafeDeref<Target = Device>
{
#[inline]
fn drop(&mut self) {
unsafe {
@ -204,7 +211,7 @@ impl error::Error for QueryPoolCreationError {
fn cause(&self) -> Option<&error::Error> {
match *self {
QueryPoolCreationError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -229,7 +236,7 @@ impl From<Error> for QueryPoolCreationError {
match err {
err @ Error::OutOfHostMemory => QueryPoolCreationError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => QueryPoolCreationError::OomError(OomError::from(err)),
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -240,18 +247,16 @@ pub struct OcclusionQueriesPool {
impl OcclusionQueriesPool {
/// See the docs of new().
pub fn raw(device: Arc<Device>, num_slots: u32)
-> Result<OcclusionQueriesPool, OomError>
{
pub fn raw(device: Arc<Device>, num_slots: u32) -> Result<OcclusionQueriesPool, OomError> {
Ok(OcclusionQueriesPool {
inner: match UnsafeQueryPool::new(device, QueryType::Occlusion, num_slots) {
Ok(q) => q,
Err(QueryPoolCreationError::OomError(err)) => return Err(err),
Err(QueryPoolCreationError::PipelineStatisticsQueryFeatureNotEnabled) => {
unreachable!()
},
}
})
inner: match UnsafeQueryPool::new(device, QueryType::Occlusion, num_slots) {
Ok(q) => q,
Err(QueryPoolCreationError::OomError(err)) => return Err(err),
Err(QueryPoolCreationError::PipelineStatisticsQueryFeatureNotEnabled) => {
unreachable!()
},
},
})
}
/// Builds a new query pool.
@ -261,10 +266,8 @@ impl OcclusionQueriesPool {
/// - Panics if the device or host ran out of memory.
///
#[inline]
pub fn new(device: Arc<Device>, num_slots: u32)
-> Arc<OcclusionQueriesPool>
{
Arc::new(OcclusionQueriesPool::raw(device, num_slots).unwrap())
pub fn new(device: Arc<Device>, num_slots: u32) -> Arc<OcclusionQueriesPool> {
Arc::new(OcclusionQueriesPool::raw(device, num_slots).unwrap())
}
/// Returns the number of slots of that query pool.
@ -301,7 +304,7 @@ mod tests {
let ty = QueryType::PipelineStatistics(QueryPipelineStatisticFlags::none());
match UnsafeQueryPool::new(device, ty, 256) {
Err(QueryPoolCreationError::PipelineStatisticsQueryFeatureNotEnabled) => (),
_ => panic!()
_ => panic!(),
};
}
}

View File

@ -61,17 +61,18 @@
//! Samplers that don't use `ClampToBorder` are not concerned by these restrictions.
//!
// FIXME: restrictions aren't checked yet
use std::error;
use std::fmt;
use std::mem;
use std::ptr;
use std::sync::Arc;
use device::Device;
use Error;
use OomError;
use VulkanObject;
use check_errors;
use device::Device;
use vk;
pub use pipeline::depth_stencil::Compare;
@ -99,9 +100,18 @@ impl Sampler {
///
#[inline]
pub fn simple_repeat_linear(device: Arc<Device>) -> Arc<Sampler> {
Sampler::new(device, Filter::Linear, Filter::Linear, MipmapMode::Linear,
SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 1_000.0).unwrap()
Sampler::new(device,
Filter::Linear,
Filter::Linear,
MipmapMode::Linear,
SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat,
0.0,
1.0,
0.0,
1_000.0)
.unwrap()
}
/// Shortcut for creating a sampler with linear sampling, that only uses the main level of
@ -115,9 +125,18 @@ impl Sampler {
///
#[inline]
pub fn simple_repeat_linear_no_mipmap(device: Arc<Device>) -> Arc<Sampler> {
Sampler::new(device, Filter::Linear, Filter::Linear, MipmapMode::Nearest,
SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 1.0).unwrap()
Sampler::new(device,
Filter::Linear,
Filter::Linear,
MipmapMode::Nearest,
SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat,
SamplerAddressMode::Repeat,
0.0,
1.0,
0.0,
1.0)
.unwrap()
}
/// Creates a new `Sampler` with the given behavior.
@ -150,10 +169,19 @@ impl Sampler {
mipmap_mode: MipmapMode, address_u: SamplerAddressMode,
address_v: SamplerAddressMode, address_w: SamplerAddressMode, mip_lod_bias: f32,
max_anisotropy: f32, min_lod: f32, max_lod: f32)
-> Result<Arc<Sampler>, SamplerCreationError>
{
Sampler::new_impl(device, mag_filter, min_filter, mipmap_mode, address_u, address_v,
address_w, mip_lod_bias, max_anisotropy, min_lod, max_lod, None)
-> Result<Arc<Sampler>, SamplerCreationError> {
Sampler::new_impl(device,
mag_filter,
min_filter,
mipmap_mode,
address_u,
address_v,
address_w,
mip_lod_bias,
max_anisotropy,
min_lod,
max_lod,
None)
}
/// Creates a new `Sampler` with the given behavior.
@ -175,20 +203,29 @@ impl Sampler {
#[inline(always)]
pub fn compare(device: Arc<Device>, mag_filter: Filter, min_filter: Filter,
mipmap_mode: MipmapMode, address_u: SamplerAddressMode,
address_v: SamplerAddressMode, address_w: SamplerAddressMode, mip_lod_bias: f32,
max_anisotropy: f32, min_lod: f32, max_lod: f32, compare: Compare)
-> Result<Arc<Sampler>, SamplerCreationError>
{
Sampler::new_impl(device, mag_filter, min_filter, mipmap_mode, address_u, address_v,
address_w, mip_lod_bias, max_anisotropy, min_lod, max_lod, Some(compare))
address_v: SamplerAddressMode, address_w: SamplerAddressMode,
mip_lod_bias: f32, max_anisotropy: f32, min_lod: f32, max_lod: f32,
compare: Compare)
-> Result<Arc<Sampler>, SamplerCreationError> {
Sampler::new_impl(device,
mag_filter,
min_filter,
mipmap_mode,
address_u,
address_v,
address_w,
mip_lod_bias,
max_anisotropy,
min_lod,
max_lod,
Some(compare))
}
fn new_impl(device: Arc<Device>, mag_filter: Filter, min_filter: Filter,
mipmap_mode: MipmapMode, address_u: SamplerAddressMode,
address_v: SamplerAddressMode, address_w: SamplerAddressMode, mip_lod_bias: f32,
max_anisotropy: f32, min_lod: f32, max_lod: f32, compare: Option<Compare>)
-> Result<Arc<Sampler>, SamplerCreationError>
{
-> Result<Arc<Sampler>, SamplerCreationError> {
assert!(max_anisotropy >= 1.0);
assert!(min_lod <= max_lod);
@ -201,9 +238,9 @@ impl Sampler {
let limit = device.physical_device().limits().max_sampler_anisotropy();
if max_anisotropy > limit {
return Err(SamplerCreationError::AnisotropyLimitExceeded {
requested: max_anisotropy,
maximum: limit,
});
requested: max_anisotropy,
maximum: limit,
});
}
}
@ -212,16 +249,17 @@ impl Sampler {
let limit = device.physical_device().limits().max_sampler_lod_bias();
if mip_lod_bias > limit {
return Err(SamplerCreationError::MipLodBiasLimitExceeded {
requested: mip_lod_bias,
maximum: limit,
});
requested: mip_lod_bias,
maximum: limit,
});
}
}
// Check MirrorClampToEdge extension support
if [address_u, address_v, address_w]
.iter()
.any(|&mode| mode == SamplerAddressMode::MirrorClampToEdge) {
.any(|&mode| mode == SamplerAddressMode::MirrorClampToEdge)
{
if !device.loaded_extensions().khr_sampler_mirror_clamp_to_edge {
return Err(SamplerCreationError::SamplerMirrorClampToEdgeExtensionNotEnabled);
}
@ -230,12 +268,18 @@ impl Sampler {
// Handling border color.
let border_color = address_u.border_color();
let border_color = match (border_color, address_v.border_color()) {
(Some(b1), Some(b2)) => { assert_eq!(b1, b2); Some(b1) },
(Some(b1), Some(b2)) => {
assert_eq!(b1, b2);
Some(b1)
},
(None, b) => b,
(b, None) => b,
};
let border_color = match (border_color, address_w.border_color()) {
(Some(b1), Some(b2)) => { assert_eq!(b1, b2); Some(b1) },
(Some(b1), Some(b2)) => {
assert_eq!(b1, b2);
Some(b1)
},
(None, b) => b,
(b, None) => b,
};
@ -245,7 +289,7 @@ impl Sampler {
let infos = vk::SamplerCreateInfo {
sType: vk::STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
magFilter: mag_filter as u32,
minFilter: min_filter as u32,
mipmapMode: mipmap_mode as u32,
@ -253,9 +297,17 @@ impl Sampler {
addressModeV: address_v.to_vk(),
addressModeW: address_w.to_vk(),
mipLodBias: mip_lod_bias,
anisotropyEnable: if max_anisotropy > 1.0 { vk::TRUE } else { vk::FALSE },
anisotropyEnable: if max_anisotropy > 1.0 {
vk::TRUE
} else {
vk::FALSE
},
maxAnisotropy: max_anisotropy,
compareEnable: if compare.is_some() { vk::TRUE } else { vk:: FALSE },
compareEnable: if compare.is_some() {
vk::TRUE
} else {
vk::FALSE
},
compareOp: compare.map(|c| c as u32).unwrap_or(0),
minLod: min_lod,
maxLod: max_lod,
@ -264,36 +316,39 @@ impl Sampler {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateSampler(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateSampler(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(Arc::new(Sampler {
sampler: sampler,
device: device.clone(),
compare_mode: compare.is_some(),
unnormalized: false,
usable_with_float_formats: match border_color {
Some(BorderColor::FloatTransparentBlack) => true,
Some(BorderColor::FloatOpaqueBlack) => true,
Some(BorderColor::FloatOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_int_formats: compare.is_none() && match border_color {
Some(BorderColor::IntTransparentBlack) => true,
Some(BorderColor::IntOpaqueBlack) => true,
Some(BorderColor::IntOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_swizzling: match border_color {
Some(BorderColor::FloatOpaqueBlack) => false,
Some(BorderColor::IntOpaqueBlack) => false,
_ => true,
},
}))
sampler: sampler,
device: device.clone(),
compare_mode: compare.is_some(),
unnormalized: false,
usable_with_float_formats: match border_color {
Some(BorderColor::FloatTransparentBlack) => true,
Some(BorderColor::FloatOpaqueBlack) => true,
Some(BorderColor::FloatOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_int_formats: compare.is_none() &&
match border_color {
Some(BorderColor::IntTransparentBlack) => true,
Some(BorderColor::IntOpaqueBlack) => true,
Some(BorderColor::IntOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_swizzling: match border_color {
Some(BorderColor::FloatOpaqueBlack) => false,
Some(BorderColor::IntOpaqueBlack) => false,
_ => true,
},
}))
}
/// Creates a sampler with unnormalized coordinates. This means that texture coordinates won't
@ -312,13 +367,15 @@ impl Sampler {
pub fn unnormalized(device: Arc<Device>, filter: Filter,
address_u: UnnormalizedSamplerAddressMode,
address_v: UnnormalizedSamplerAddressMode)
-> Result<Arc<Sampler>, SamplerCreationError>
{
-> Result<Arc<Sampler>, SamplerCreationError> {
let vk = device.pointers();
let border_color = address_u.border_color();
let border_color = match (border_color, address_v.border_color()) {
(Some(b1), Some(b2)) => { assert_eq!(b1, b2); Some(b1) },
(Some(b1), Some(b2)) => {
assert_eq!(b1, b2);
Some(b1)
},
(None, b) => b,
(b, None) => b,
};
@ -327,13 +384,13 @@ impl Sampler {
let infos = vk::SamplerCreateInfo {
sType: vk::STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
pNext: ptr::null(),
flags: 0, // reserved
flags: 0, // reserved
magFilter: filter as u32,
minFilter: filter as u32,
mipmapMode: vk::SAMPLER_MIPMAP_MODE_NEAREST,
addressModeU: address_u.to_vk(),
addressModeV: address_v.to_vk(),
addressModeW: vk::SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // unused by the impl
addressModeW: vk::SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // unused by the impl
mipLodBias: 0.0,
anisotropyEnable: vk::FALSE,
maxAnisotropy: 1.0,
@ -346,36 +403,38 @@ impl Sampler {
};
let mut output = mem::uninitialized();
try!(check_errors(vk.CreateSampler(device.internal_object(), &infos,
ptr::null(), &mut output)));
check_errors(vk.CreateSampler(device.internal_object(),
&infos,
ptr::null(),
&mut output))?;
output
};
Ok(Arc::new(Sampler {
sampler: sampler,
device: device.clone(),
compare_mode: false,
unnormalized: true,
usable_with_float_formats: match border_color {
Some(BorderColor::FloatTransparentBlack) => true,
Some(BorderColor::FloatOpaqueBlack) => true,
Some(BorderColor::FloatOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_int_formats: match border_color {
Some(BorderColor::IntTransparentBlack) => true,
Some(BorderColor::IntOpaqueBlack) => true,
Some(BorderColor::IntOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_swizzling: match border_color {
Some(BorderColor::FloatOpaqueBlack) => false,
Some(BorderColor::IntOpaqueBlack) => false,
_ => true,
},
}))
sampler: sampler,
device: device.clone(),
compare_mode: false,
unnormalized: true,
usable_with_float_formats: match border_color {
Some(BorderColor::FloatTransparentBlack) => true,
Some(BorderColor::FloatOpaqueBlack) => true,
Some(BorderColor::FloatOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_int_formats: match border_color {
Some(BorderColor::IntTransparentBlack) => true,
Some(BorderColor::IntOpaqueBlack) => true,
Some(BorderColor::IntOpaqueWhite) => true,
Some(_) => false,
None => true,
},
usable_with_swizzling: match border_color {
Some(BorderColor::FloatOpaqueBlack) => false,
Some(BorderColor::IntOpaqueBlack) => false,
_ => true,
},
}))
}
/// Returns true if the sampler is a compare-mode sampler.
@ -507,7 +566,7 @@ impl SamplerAddressMode {
fn border_color(self) -> Option<BorderColor> {
match self {
SamplerAddressMode::ClampToBorder(c) => Some(c),
_ => None
_ => None,
}
}
}
@ -598,7 +657,7 @@ pub enum SamplerCreationError {
/// The value that was requested.
requested: f32,
/// The maximum supported value.
maximum: f32
maximum: f32,
},
/// The requested mip lod bias exceeds the device's limits.
@ -606,7 +665,7 @@ pub enum SamplerCreationError {
/// The value that was requested.
requested: f32,
/// The maximum supported value.
maximum: f32
maximum: f32,
},
/// Using `MirrorClampToEdge` requires enabling the `VK_KHR_sampler_mirror_clamp_to_edge`
@ -620,8 +679,8 @@ impl error::Error for SamplerCreationError {
match *self {
SamplerCreationError::OomError(_) => "not enough memory available",
SamplerCreationError::TooManyObjects => "too many simultaneous sampler objects",
SamplerCreationError::SamplerAnisotropyFeatureNotEnabled => "the `sampler_anisotropy` \
feature is not enabled",
SamplerCreationError::SamplerAnisotropyFeatureNotEnabled =>
"the `sampler_anisotropy` feature is not enabled",
SamplerCreationError::AnisotropyLimitExceeded { .. } => "anisotropy limit exceeded",
SamplerCreationError::MipLodBiasLimitExceeded { .. } => "mip lod bias limit exceeded",
SamplerCreationError::SamplerMirrorClampToEdgeExtensionNotEnabled =>
@ -633,7 +692,7 @@ impl error::Error for SamplerCreationError {
fn cause(&self) -> Option<&error::Error> {
match *self {
SamplerCreationError::OomError(ref err) => Some(err),
_ => None
_ => None,
}
}
}
@ -659,7 +718,7 @@ impl From<Error> for SamplerCreationError {
err @ Error::OutOfHostMemory => SamplerCreationError::OomError(OomError::from(err)),
err @ Error::OutOfDeviceMemory => SamplerCreationError::OomError(OomError::from(err)),
Error::TooManyObjects => SamplerCreationError::TooManyObjects,
_ => panic!("unexpected error: {:?}", err)
_ => panic!("unexpected error: {:?}", err),
}
}
}
@ -672,12 +731,18 @@ mod tests {
fn create_regular() {
let (device, queue) = gfx_dev_and_queue!();
let s = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let s = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat, 1.0, 1.0,
0.0, 2.0).unwrap();
sampler::SamplerAddressMode::Repeat,
1.0,
1.0,
0.0,
2.0)
.unwrap();
assert!(!s.compare_mode());
assert!(!s.is_unnormalized());
}
@ -686,12 +751,19 @@ mod tests {
fn create_compare() {
let (device, queue) = gfx_dev_and_queue!();
let s = sampler::Sampler::compare(device, sampler::Filter::Linear, sampler::Filter::Linear,
let s = sampler::Sampler::compare(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat, 1.0, 1.0,
0.0, 2.0, sampler::Compare::Less).unwrap();
sampler::SamplerAddressMode::Repeat,
1.0,
1.0,
0.0,
2.0,
sampler::Compare::Less)
.unwrap();
assert!(s.compare_mode());
assert!(!s.is_unnormalized());
@ -701,10 +773,12 @@ mod tests {
fn create_unnormalized() {
let (device, queue) = gfx_dev_and_queue!();
let s = sampler::Sampler::unnormalized(device, sampler::Filter::Linear,
sampler::UnnormalizedSamplerAddressMode::ClampToEdge,
sampler::UnnormalizedSamplerAddressMode::ClampToEdge)
.unwrap();
let s =
sampler::Sampler::unnormalized(device,
sampler::Filter::Linear,
sampler::UnnormalizedSamplerAddressMode::ClampToEdge,
sampler::UnnormalizedSamplerAddressMode::ClampToEdge)
.unwrap();
assert!(!s.compare_mode());
assert!(s.is_unnormalized());
@ -727,11 +801,17 @@ mod tests {
fn min_lod_inferior() {
let (device, queue) = gfx_dev_and_queue!();
let _ = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let _ = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat, 1.0, 1.0, 5.0, 2.0);
sampler::SamplerAddressMode::Repeat,
1.0,
1.0,
5.0,
2.0);
}
#[test]
@ -739,11 +819,17 @@ mod tests {
fn max_anisotropy() {
let (device, queue) = gfx_dev_and_queue!();
let _ = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let _ = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat, 1.0, 0.5, 0.0, 2.0);
sampler::SamplerAddressMode::Repeat,
1.0,
0.5,
0.0,
2.0);
}
#[test]
@ -754,26 +840,38 @@ mod tests {
let b1 = sampler::BorderColor::IntTransparentBlack;
let b2 = sampler::BorderColor::FloatOpaqueWhite;
let _ = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let _ = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::ClampToBorder(b1),
sampler::SamplerAddressMode::ClampToBorder(b2),
sampler::SamplerAddressMode::Repeat, 1.0, 1.0, 5.0, 2.0);
sampler::SamplerAddressMode::Repeat,
1.0,
1.0,
5.0,
2.0);
}
#[test]
fn anisotropy_feature() {
let (device, queue) = gfx_dev_and_queue!();
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let r = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat, 1.0, 2.0, 0.0, 2.0);
sampler::SamplerAddressMode::Repeat,
1.0,
2.0,
0.0,
2.0);
match r {
Err(sampler::SamplerCreationError::SamplerAnisotropyFeatureNotEnabled) => (),
_ => panic!()
_ => panic!(),
}
}
@ -781,16 +879,21 @@ mod tests {
fn anisotropy_limit() {
let (device, queue) = gfx_dev_and_queue!(sampler_anisotropy);
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let r = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat, 1.0, 100000000.0, 0.0,
sampler::SamplerAddressMode::Repeat,
1.0,
100000000.0,
0.0,
2.0);
match r {
Err(sampler::SamplerCreationError::AnisotropyLimitExceeded { .. }) => (),
_ => panic!()
_ => panic!(),
}
}
@ -798,16 +901,21 @@ mod tests {
fn mip_lod_bias_limit() {
let (device, queue) = gfx_dev_and_queue!();
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let r = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat,
sampler::SamplerAddressMode::Repeat, 100000000.0, 1.0, 0.0,
sampler::SamplerAddressMode::Repeat,
100000000.0,
1.0,
0.0,
2.0);
match r {
Err(sampler::SamplerCreationError::MipLodBiasLimitExceeded { .. }) => (),
_ => panic!()
_ => panic!(),
}
}
@ -815,16 +923,21 @@ mod tests {
fn sampler_mirror_clamp_to_edge_extension() {
let (device, queue) = gfx_dev_and_queue!();
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
let r = sampler::Sampler::new(device,
sampler::Filter::Linear,
sampler::Filter::Linear,
sampler::MipmapMode::Nearest,
sampler::SamplerAddressMode::MirrorClampToEdge,
sampler::SamplerAddressMode::MirrorClampToEdge,
sampler::SamplerAddressMode::MirrorClampToEdge, 1.0, 1.0,
0.0, 2.0);
sampler::SamplerAddressMode::MirrorClampToEdge,
1.0,
1.0,
0.0,
2.0);
match r {
Err(sampler::SamplerCreationError::SamplerMirrorClampToEdgeExtensionNotEnabled) => (),
_ => panic!()
_ => panic!(),
}
}
}

View File

@ -51,7 +51,7 @@ pub struct Capabilities {
pub supported_usage_flags: ImageUsage,
/// List of formats supported for the swapchain.
pub supported_formats: Vec<(Format, ColorSpace)>, // TODO: https://github.com/KhronosGroup/Vulkan-Docs/issues/207
pub supported_formats: Vec<(Format, ColorSpace)>, // TODO: https://github.com/KhronosGroup/Vulkan-Docs/issues/207
/// List of present modes that are supported. `Fifo` is always guaranteed to be supported.
pub present_modes: SupportedPresentModes,
@ -103,7 +103,7 @@ pub fn supported_present_modes_from_list<I>(elem: I) -> SupportedPresentModes
vk::PRESENT_MODE_MAILBOX_KHR => result.mailbox = true,
vk::PRESENT_MODE_FIFO_KHR => result.fifo = true,
vk::PRESENT_MODE_FIFO_RELAXED_KHR => result.relaxed = true,
_ => panic!("Wrong value for vk::PresentModeKHR")
_ => panic!("Wrong value for vk::PresentModeKHR"),
}
}
result
@ -148,10 +148,22 @@ impl Iterator for SupportedPresentModesIter {
#[inline]
fn next(&mut self) -> Option<PresentMode> {
if self.0.immediate { self.0.immediate = false; return Some(PresentMode::Immediate); }
if self.0.mailbox { self.0.mailbox = false; return Some(PresentMode::Mailbox); }
if self.0.fifo { self.0.fifo = false; return Some(PresentMode::Fifo); }
if self.0.relaxed { self.0.relaxed = false; return Some(PresentMode::Relaxed); }
if self.0.immediate {
self.0.immediate = false;
return Some(PresentMode::Immediate);
}
if self.0.mailbox {
self.0.mailbox = false;
return Some(PresentMode::Mailbox);
}
if self.0.fifo {
self.0.fifo = false;
return Some(PresentMode::Fifo);
}
if self.0.relaxed {
self.0.relaxed = false;
return Some(PresentMode::Relaxed);
}
None
}
}
@ -214,10 +226,18 @@ pub struct SupportedCompositeAlpha {
pub fn supported_composite_alpha_from_bits(val: u32) -> SupportedCompositeAlpha {
let mut result = SupportedCompositeAlpha::none();
if (val & vk::COMPOSITE_ALPHA_OPAQUE_BIT_KHR) != 0 { result.opaque = true; }
if (val & vk::COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR) != 0 { result.pre_multiplied = true; }
if (val & vk::COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR) != 0 { result.post_multiplied = true; }
if (val & vk::COMPOSITE_ALPHA_INHERIT_BIT_KHR) != 0 { result.inherit = true; }
if (val & vk::COMPOSITE_ALPHA_OPAQUE_BIT_KHR) != 0 {
result.opaque = true;
}
if (val & vk::COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR) != 0 {
result.pre_multiplied = true;
}
if (val & vk::COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR) != 0 {
result.post_multiplied = true;
}
if (val & vk::COMPOSITE_ALPHA_INHERIT_BIT_KHR) != 0 {
result.inherit = true;
}
result
}
@ -260,10 +280,22 @@ impl Iterator for SupportedCompositeAlphaIter {
#[inline]
fn next(&mut self) -> Option<CompositeAlpha> {
if self.0.opaque { self.0.opaque = false; return Some(CompositeAlpha::Opaque); }
if self.0.pre_multiplied { self.0.pre_multiplied = false; return Some(CompositeAlpha::PreMultiplied); }
if self.0.post_multiplied { self.0.post_multiplied = false; return Some(CompositeAlpha::PostMultiplied); }
if self.0.inherit { self.0.inherit = false; return Some(CompositeAlpha::Inherit); }
if self.0.opaque {
self.0.opaque = false;
return Some(CompositeAlpha::Opaque);
}
if self.0.pre_multiplied {
self.0.pre_multiplied = false;
return Some(CompositeAlpha::PreMultiplied);
}
if self.0.post_multiplied {
self.0.post_multiplied = false;
return Some(CompositeAlpha::PostMultiplied);
}
if self.0.inherit {
self.0.inherit = false;
return Some(CompositeAlpha::Inherit);
}
None
}
}
@ -282,7 +314,8 @@ pub struct SupportedSurfaceTransforms {
pub inherit: bool,
}
pub fn surface_transforms_from_bits(val: vk::SurfaceTransformFlagsKHR) -> SupportedSurfaceTransforms {
pub fn surface_transforms_from_bits(val: vk::SurfaceTransformFlagsKHR)
-> SupportedSurfaceTransforms {
macro_rules! v {
($val:expr, $out:ident, $e:expr, $f:ident) => (
if ($val & $e) != 0 { $out.$f = true; }
@ -290,17 +323,38 @@ pub fn surface_transforms_from_bits(val: vk::SurfaceTransformFlagsKHR) -> Suppor
}
let mut result = SupportedSurfaceTransforms::none();
v!(val, result, vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR, identity);
v!(val, result, vk::SURFACE_TRANSFORM_ROTATE_90_BIT_KHR, rotate90);
v!(val, result, vk::SURFACE_TRANSFORM_ROTATE_180_BIT_KHR, rotate180);
v!(val, result, vk::SURFACE_TRANSFORM_ROTATE_270_BIT_KHR, rotate270);
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR, horizontal_mirror);
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR,
horizontal_mirror_rotate90);
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR,
horizontal_mirror_rotate180);
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR,
horizontal_mirror_rotate270);
v!(val,
result,
vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR,
identity);
v!(val,
result,
vk::SURFACE_TRANSFORM_ROTATE_90_BIT_KHR,
rotate90);
v!(val,
result,
vk::SURFACE_TRANSFORM_ROTATE_180_BIT_KHR,
rotate180);
v!(val,
result,
vk::SURFACE_TRANSFORM_ROTATE_270_BIT_KHR,
rotate270);
v!(val,
result,
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR,
horizontal_mirror);
v!(val,
result,
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR,
horizontal_mirror_rotate90);
v!(val,
result,
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR,
horizontal_mirror_rotate180);
v!(val,
result,
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR,
horizontal_mirror_rotate270);
v!(val, result, vk::SURFACE_TRANSFORM_INHERIT_BIT_KHR, inherit);
result
}
@ -354,15 +408,42 @@ impl Iterator for SupportedSurfaceTransformsIter {
#[inline]
fn next(&mut self) -> Option<SurfaceTransform> {
if self.0.identity { self.0.identity = false; return Some(SurfaceTransform::Identity); }
if self.0.rotate90 { self.0.rotate90 = false; return Some(SurfaceTransform::Rotate90); }
if self.0.rotate180 { self.0.rotate180 = false; return Some(SurfaceTransform::Rotate180); }
if self.0.rotate270 { self.0.rotate270 = false; return Some(SurfaceTransform::Rotate270); }
if self.0.horizontal_mirror { self.0.horizontal_mirror = false; return Some(SurfaceTransform::HorizontalMirror); }
if self.0.horizontal_mirror_rotate90 { self.0.horizontal_mirror_rotate90 = false; return Some(SurfaceTransform::HorizontalMirrorRotate90); }
if self.0.horizontal_mirror_rotate180 { self.0.horizontal_mirror_rotate180 = false; return Some(SurfaceTransform::HorizontalMirrorRotate180); }
if self.0.horizontal_mirror_rotate270 { self.0.horizontal_mirror_rotate270 = false; return Some(SurfaceTransform::HorizontalMirrorRotate270); }
if self.0.inherit { self.0.inherit = false; return Some(SurfaceTransform::Inherit); }
if self.0.identity {
self.0.identity = false;
return Some(SurfaceTransform::Identity);
}
if self.0.rotate90 {
self.0.rotate90 = false;
return Some(SurfaceTransform::Rotate90);
}
if self.0.rotate180 {
self.0.rotate180 = false;
return Some(SurfaceTransform::Rotate180);
}
if self.0.rotate270 {
self.0.rotate270 = false;
return Some(SurfaceTransform::Rotate270);
}
if self.0.horizontal_mirror {
self.0.horizontal_mirror = false;
return Some(SurfaceTransform::HorizontalMirror);
}
if self.0.horizontal_mirror_rotate90 {
self.0.horizontal_mirror_rotate90 = false;
return Some(SurfaceTransform::HorizontalMirrorRotate90);
}
if self.0.horizontal_mirror_rotate180 {
self.0.horizontal_mirror_rotate180 = false;
return Some(SurfaceTransform::HorizontalMirrorRotate180);
}
if self.0.horizontal_mirror_rotate270 {
self.0.horizontal_mirror_rotate270 = false;
return Some(SurfaceTransform::HorizontalMirrorRotate270);
}
if self.0.inherit {
self.0.inherit = false;
return Some(SurfaceTransform::Inherit);
}
None
}
}
@ -497,6 +578,6 @@ pub fn color_space_from_num(val: u32) -> ColorSpace {
vk::COLOR_SPACE_ADOBERGB_LINEAR_EXT => ColorSpace::AdobeRgbLinear,
vk::COLOR_SPACE_ADOBERGB_NONLINEAR_EXT => ColorSpace::AdobeRgbNonLinear,
vk::COLOR_SPACE_PASS_THROUGH_EXT => ColorSpace::PassThrough,
_ => panic!("Wrong value for color space enum")
_ => panic!("Wrong value for color space enum"),
}
}

View File

@ -8,13 +8,13 @@
// according to those terms.
//! Allows you to create surfaces that fill a whole display, outside of the windowing system.
//!
//!
//! **As far as the author knows, no existing device supports these features. Therefore the code
//! here is mostly a draft and needs rework in both the API and the implementation.**
//!
//!
//! The purpose of the objects in this module is to let you create a `Surface` object that
//! represents a location on the screen. This is done in four steps:
//!
//!
//! - Choose a `Display` where the surface will be located. A `Display` represents a display
//! display, usually a monitor. The available displays can be enumerated with
//! `Display::enumerate`.
@ -25,8 +25,8 @@
//! - Create a `Surface` object with `Surface::from_display_mode` and pass the chosen `DisplayMode`
//! and `DisplayPlane`.
#![allow(dead_code)] // TODO: this module isn't finished
#![allow(unused_variables)] // TODO: this module isn't finished
#![allow(dead_code)] // TODO: this module isn't finished
#![allow(unused_variables)] // TODO: this module isn't finished
use std::ffi::CStr;
use std::ptr;
@ -35,12 +35,12 @@ use std::vec::IntoIter;
use instance::Instance;
use instance::PhysicalDevice;
use swapchain::capabilities;
use swapchain::SupportedSurfaceTransforms;
use swapchain::capabilities;
use check_errors;
use OomError;
use VulkanObject;
use check_errors;
use vk;
// TODO: extract this to a `display` module and solve the visibility problems
@ -61,21 +61,22 @@ impl DisplayPlane {
pub fn enumerate_raw(device: &PhysicalDevice) -> Result<IntoIter<DisplayPlane>, OomError> {
let vk = device.instance().pointers();
assert!(device.instance().loaded_extensions().khr_display); // TODO: return error instead
assert!(device.instance().loaded_extensions().khr_display); // TODO: return error instead
let num = unsafe {
let mut num: u32 = 0;
try!(check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
&mut num, ptr::null_mut())));
check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
&mut num,
ptr::null_mut()))?;
num
};
let planes: Vec<vk::DisplayPlanePropertiesKHR> = unsafe {
let mut planes = Vec::with_capacity(num as usize);
let mut num = num;
try!(check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
&mut num,
planes.as_mut_ptr())));
check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
&mut num,
planes.as_mut_ptr()))?;
planes.set_len(num as usize);
planes
};
@ -107,7 +108,7 @@ impl DisplayPlane {
}
}).collect::<Vec<_>>().into_iter())
}
/// Enumerates all the display planes that are available on a given physical device.
///
/// # Panic
@ -140,7 +141,10 @@ impl DisplayPlane {
return false;
}
self.supported_displays.iter().find(|&&d| d == display.internal_object()).is_some()
self.supported_displays
.iter()
.find(|&&d| d == display.internal_object())
.is_some()
}
}
@ -150,41 +154,46 @@ impl DisplayPlane {
pub struct Display {
instance: Arc<Instance>,
physical_device: usize,
properties: Arc<vk::DisplayPropertiesKHR>, // TODO: Arc because struct isn't clone
properties: Arc<vk::DisplayPropertiesKHR>, // TODO: Arc because struct isn't clone
}
impl Display {
/// See the docs of enumerate().
pub fn enumerate_raw(device: &PhysicalDevice) -> Result<IntoIter<Display>, OomError> {
let vk = device.instance().pointers();
assert!(device.instance().loaded_extensions().khr_display); // TODO: return error instead
assert!(device.instance().loaded_extensions().khr_display); // TODO: return error instead
let num = unsafe {
let mut num = 0;
try!(check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
&mut num, ptr::null_mut())));
check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
&mut num,
ptr::null_mut()))?;
num
};
let displays: Vec<vk::DisplayPropertiesKHR> = unsafe {
let mut displays = Vec::with_capacity(num as usize);
let mut num = num;
try!(check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
&mut num,
displays.as_mut_ptr())));
check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
&mut num,
displays.as_mut_ptr()))?;
displays.set_len(num as usize);
displays
};
Ok(displays.into_iter().map(|prop| {
Display {
instance: device.instance().clone(),
physical_device: device.index(),
properties: Arc::new(prop),
}
}).collect::<Vec<_>>().into_iter())
Ok(displays
.into_iter()
.map(|prop| {
Display {
instance: device.instance().clone(),
physical_device: device.index(),
properties: Arc::new(prop),
}
})
.collect::<Vec<_>>()
.into_iter())
}
/// Enumerates all the displays that are available on a given physical device.
///
/// # Panic
@ -254,31 +263,37 @@ impl Display {
let num = unsafe {
let mut num = 0;
try!(check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
self.properties.display,
&mut num, ptr::null_mut())));
check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
self.properties.display,
&mut num,
ptr::null_mut()))?;
num
};
let modes: Vec<vk::DisplayModePropertiesKHR> = unsafe {
let mut modes = Vec::with_capacity(num as usize);
let mut num = num;
try!(check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
self.properties.display, &mut num,
modes.as_mut_ptr())));
check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
self.properties.display,
&mut num,
modes.as_mut_ptr()))?;
modes.set_len(num as usize);
modes
};
Ok(modes.into_iter().map(|mode| {
DisplayMode {
display: self.clone(),
display_mode: mode.displayMode,
parameters: mode.parameters,
}
}).collect::<Vec<_>>().into_iter())
Ok(modes
.into_iter()
.map(|mode| {
DisplayMode {
display: self.clone(),
display_mode: mode.displayMode,
parameters: mode.parameters,
}
})
.collect::<Vec<_>>()
.into_iter())
}
/// Returns a list of all modes available on this display.
///
/// # Panic

Some files were not shown because too many files have changed in this diff Show More