mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-21 22:34:43 +00:00
Run rustfmt on the code
This commit is contained in:
parent
d7c9d08f76
commit
ef466eac85
18
.rustfmt.toml
Normal file
18
.rustfmt.toml
Normal file
@ -0,0 +1,18 @@
|
||||
fn_args_density = "Compressed"
|
||||
fn_args_layout = "Visual"
|
||||
fn_brace_style = "SameLineWhere"
|
||||
fn_call_style = "Visual"
|
||||
fn_empty_single_line = false
|
||||
format_strings = true
|
||||
generics_indent = "Visual"
|
||||
impl_empty_single_line = false
|
||||
match_block_trailing_comma = true
|
||||
reorder_imported_names = true
|
||||
reorder_imports = true
|
||||
reorder_imports_in_group = true
|
||||
spaces_around_ranges = true
|
||||
use_try_shorthand = true
|
||||
where_density = "Tall"
|
||||
where_style = "Legacy"
|
||||
wrap_match_arms = false
|
||||
write_mode = "Overwrite"
|
@ -18,9 +18,15 @@ fn main() {
|
||||
} else {
|
||||
// Try to initialize submodules. Don't care if it fails, since this code also runs for
|
||||
// the crates.io package.
|
||||
let _ = Command::new("git").arg("submodule").arg("update").arg("--init").status();
|
||||
let _ = Command::new("git")
|
||||
.arg("submodule")
|
||||
.arg("update")
|
||||
.arg("--init")
|
||||
.status();
|
||||
cmake::build("glslang");
|
||||
Path::new(&env::var("OUT_DIR").unwrap()).join("bin").join("glslangValidator")
|
||||
Path::new(&env::var("OUT_DIR").unwrap())
|
||||
.join("bin")
|
||||
.join("glslangValidator")
|
||||
};
|
||||
|
||||
if let Err(_) = fs::hard_link(&path, &out_file) {
|
||||
|
@ -43,11 +43,16 @@ fn compile_inner<'a, I>(shaders: I) -> Result<SpirvOutput, String>
|
||||
};
|
||||
|
||||
let file_path = temp_dir.path().join(format!("{}{}", num, extension));
|
||||
File::create(&file_path).unwrap().write_all(source.as_bytes()).unwrap();
|
||||
File::create(&file_path)
|
||||
.unwrap()
|
||||
.write_all(source.as_bytes())
|
||||
.unwrap();
|
||||
command.arg(file_path);
|
||||
}
|
||||
|
||||
let output = command.output().expect("Failed to execute glslangValidator");
|
||||
let output = command
|
||||
.output()
|
||||
.expect("Failed to execute glslangValidator");
|
||||
|
||||
if output.status.success() {
|
||||
let spirv_output = File::open(output_file).expect("failed to open SPIR-V output file");
|
||||
|
@ -29,10 +29,14 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
|
||||
// Looping to find all the elements that have the `DescriptorSet` decoration.
|
||||
for instruction in doc.instructions.iter() {
|
||||
let (variable_id, descriptor_set) = match instruction {
|
||||
&parse::Instruction::Decorate { target_id, decoration: enums::Decoration::DecorationDescriptorSet, ref params } => {
|
||||
&parse::Instruction::Decorate {
|
||||
target_id,
|
||||
decoration: enums::Decoration::DecorationDescriptorSet,
|
||||
ref params,
|
||||
} => {
|
||||
(target_id, params[0])
|
||||
},
|
||||
_ => continue
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
// Find which type is pointed to by this variable.
|
||||
@ -41,17 +45,29 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
|
||||
let name = ::name_from_id(doc, variable_id);
|
||||
|
||||
// Find the binding point of this descriptor.
|
||||
let binding = doc.instructions.iter().filter_map(|i| {
|
||||
let binding = doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| {
|
||||
match i {
|
||||
&parse::Instruction::Decorate { target_id, decoration: enums::Decoration::DecorationBinding, ref params } if target_id == variable_id => {
|
||||
&parse::Instruction::Decorate {
|
||||
target_id,
|
||||
decoration: enums::Decoration::DecorationBinding,
|
||||
ref params,
|
||||
} if target_id == variable_id => {
|
||||
Some(params[0])
|
||||
},
|
||||
_ => None, // TODO: other types
|
||||
}
|
||||
}).next().expect(&format!("Uniform `{}` is missing a binding", name));
|
||||
})
|
||||
.next()
|
||||
.expect(&format!("Uniform `{}` is missing a binding", name));
|
||||
|
||||
// Find informations about the kind of binding for this descriptor.
|
||||
let (desc_ty, readonly, array_count) = descriptor_infos(doc, pointed_ty, false).expect(&format!("Couldn't find relevant type for uniform `{}` (type {}, maybe unimplemented)", name, pointed_ty));
|
||||
let (desc_ty, readonly, array_count) = descriptor_infos(doc, pointed_ty, false)
|
||||
.expect(&format!("Couldn't find relevant type for uniform `{}` (type {}, maybe \
|
||||
unimplemented)",
|
||||
name,
|
||||
pointed_ty));
|
||||
|
||||
descriptors.push(Descriptor {
|
||||
name: name,
|
||||
@ -67,10 +83,14 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
|
||||
let mut push_constants_size = 0;
|
||||
for instruction in doc.instructions.iter() {
|
||||
let type_id = match instruction {
|
||||
&parse::Instruction::TypePointer { type_id, storage_class: enums::StorageClass::StorageClassPushConstant, .. } => {
|
||||
&parse::Instruction::TypePointer {
|
||||
type_id,
|
||||
storage_class: enums::StorageClass::StorageClassPushConstant,
|
||||
..
|
||||
} => {
|
||||
type_id
|
||||
},
|
||||
_ => continue
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let (_, size, _) = ::structs::type_from_id(doc, type_id);
|
||||
@ -79,54 +99,76 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
|
||||
}
|
||||
|
||||
// Writing the body of the `descriptor` method.
|
||||
let descriptor_body = descriptors.iter().map(|d| {
|
||||
format!("({set}, {binding}) => Some(DescriptorDesc {{
|
||||
let descriptor_body = descriptors
|
||||
.iter()
|
||||
.map(|d| {
|
||||
format!(
|
||||
"({set}, {binding}) => Some(DescriptorDesc {{
|
||||
ty: {desc_ty},
|
||||
array_count: {array_count},
|
||||
stages: self.0.clone(),
|
||||
readonly: {readonly},
|
||||
}}),", set = d.set, binding = d.binding, desc_ty = d.desc_ty, array_count = d.array_count,
|
||||
readonly = if d.readonly { "true" } else { "false" })
|
||||
}}),",
|
||||
set = d.set,
|
||||
binding = d.binding,
|
||||
desc_ty = d.desc_ty,
|
||||
array_count = d.array_count,
|
||||
readonly = if d.readonly { "true" } else { "false" }
|
||||
)
|
||||
|
||||
}).collect::<Vec<_>>().concat();
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.concat();
|
||||
|
||||
let num_sets = 1 + descriptors.iter().fold(0, |s, d| cmp::max(s, d.set));
|
||||
|
||||
// Writing the body of the `num_bindings_in_set` method.
|
||||
let num_bindings_in_set_body = {
|
||||
(0 .. num_sets).map(|set| {
|
||||
let num = 1 + descriptors.iter().filter(|d| d.set == set)
|
||||
(0 .. num_sets)
|
||||
.map(|set| {
|
||||
let num = 1 +
|
||||
descriptors
|
||||
.iter()
|
||||
.filter(|d| d.set == set)
|
||||
.fold(0, |s, d| cmp::max(s, d.binding));
|
||||
format!("{set} => Some({num}),", set = set, num = num)
|
||||
}).collect::<Vec<_>>().concat()
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.concat()
|
||||
};
|
||||
|
||||
// Writing the body of the `descriptor_by_name_body` method.
|
||||
let descriptor_by_name_body = descriptors.iter().map(|d| {
|
||||
let descriptor_by_name_body = descriptors
|
||||
.iter()
|
||||
.map(|d| {
|
||||
format!(r#"{name:?} => Some(({set}, {binding})),"#,
|
||||
name = d.name, set = d.set, binding = d.binding)
|
||||
}).collect::<Vec<_>>().concat();
|
||||
name = d.name,
|
||||
set = d.set,
|
||||
binding = d.binding)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.concat();
|
||||
|
||||
// Writing the body of the `num_push_constants_ranges` method.
|
||||
let num_push_constants_ranges_body = {
|
||||
if push_constants_size == 0 {
|
||||
"0"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
if push_constants_size == 0 { "0" } else { "1" }
|
||||
};
|
||||
|
||||
// Writing the body of the `push_constants_range` method.
|
||||
let push_constants_range_body = format!(r#"
|
||||
let push_constants_range_body = format!(
|
||||
r#"
|
||||
if num != 0 || {pc_size} == 0 {{ return None; }}
|
||||
Some(PipelineLayoutDescPcRange {{
|
||||
offset: 0, // FIXME: not necessarily true
|
||||
size: {pc_size},
|
||||
stages: ShaderStages::all(), // FIXME: wrong
|
||||
}})
|
||||
"#, pc_size = push_constants_size);
|
||||
"#,
|
||||
pc_size = push_constants_size
|
||||
);
|
||||
|
||||
format!(r#"
|
||||
format!(
|
||||
r#"
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Layout(ShaderStages);
|
||||
|
||||
@ -168,32 +210,45 @@ pub fn write_descriptor_sets(doc: &parse::Spirv) -> String {
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
"#, num_sets = num_sets, num_bindings_in_set_body = num_bindings_in_set_body,
|
||||
descriptor_by_name_body = descriptor_by_name_body, descriptor_body = descriptor_body,
|
||||
"#,
|
||||
num_sets = num_sets,
|
||||
num_bindings_in_set_body = num_bindings_in_set_body,
|
||||
descriptor_by_name_body = descriptor_by_name_body,
|
||||
descriptor_body = descriptor_body,
|
||||
num_push_constants_ranges_body = num_push_constants_ranges_body,
|
||||
push_constants_range_body = push_constants_range_body)
|
||||
push_constants_range_body = push_constants_range_body
|
||||
)
|
||||
}
|
||||
|
||||
/// Assumes that `variable` is a variable with a `TypePointer` and returns the id of the pointed
|
||||
/// type.
|
||||
fn pointer_variable_ty(doc: &parse::Spirv, variable: u32) -> u32 {
|
||||
let var_ty = doc.instructions.iter().filter_map(|i| {
|
||||
match i {
|
||||
&parse::Instruction::Variable { result_type_id, result_id, .. } if result_id == variable => {
|
||||
let var_ty = doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| match i {
|
||||
&parse::Instruction::Variable {
|
||||
result_type_id,
|
||||
result_id,
|
||||
..
|
||||
} if result_id == variable => {
|
||||
Some(result_type_id)
|
||||
},
|
||||
_ => None
|
||||
}
|
||||
}).next().unwrap();
|
||||
_ => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap();
|
||||
|
||||
doc.instructions.iter().filter_map(|i| {
|
||||
match i {
|
||||
&parse::Instruction::TypePointer { result_id, type_id, .. } if result_id == var_ty => {
|
||||
doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| match i {
|
||||
&parse::Instruction::TypePointer { result_id, type_id, .. }
|
||||
if result_id == var_ty => {
|
||||
Some(type_id)
|
||||
},
|
||||
_ => None
|
||||
}
|
||||
}).next().unwrap()
|
||||
_ => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Returns a `DescriptorDescTy` constructor, a bool indicating whether the descriptor is
|
||||
@ -201,8 +256,7 @@ fn pointer_variable_ty(doc: &parse::Spirv, variable: u32) -> u32 {
|
||||
///
|
||||
/// See also section 14.5.2 of the Vulkan specs: Descriptor Set Interface
|
||||
fn descriptor_infos(doc: &parse::Spirv, pointed_ty: u32, force_combined_image_sampled: bool)
|
||||
-> Option<(String, bool, u64)>
|
||||
{
|
||||
-> Option<(String, bool, u64)> {
|
||||
doc.instructions.iter().filter_map(|i| {
|
||||
match i {
|
||||
&parse::Instruction::TypeStruct { result_id, .. } if result_id == pointed_ty => {
|
||||
|
@ -10,74 +10,117 @@
|
||||
use enums;
|
||||
use parse;
|
||||
|
||||
use is_builtin;
|
||||
use name_from_id;
|
||||
use location_decoration;
|
||||
use format_from_id;
|
||||
use is_builtin;
|
||||
use location_decoration;
|
||||
use name_from_id;
|
||||
|
||||
pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -> (String, String) {
|
||||
let (execution, ep_name, interface) = match instruction {
|
||||
&parse::Instruction::EntryPoint { ref execution, ref name, ref interface, .. } => {
|
||||
&parse::Instruction::EntryPoint {
|
||||
ref execution,
|
||||
ref name,
|
||||
ref interface,
|
||||
..
|
||||
} => {
|
||||
(execution, name, interface)
|
||||
},
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let capitalized_ep_name: String = ep_name.chars().take(1).flat_map(|c| c.to_uppercase())
|
||||
.chain(ep_name.chars().skip(1)).collect();
|
||||
let capitalized_ep_name: String = ep_name
|
||||
.chars()
|
||||
.take(1)
|
||||
.flat_map(|c| c.to_uppercase())
|
||||
.chain(ep_name.chars().skip(1))
|
||||
.collect();
|
||||
|
||||
let interface_structs = write_interface_structs(doc, &capitalized_ep_name, interface,
|
||||
let interface_structs =
|
||||
write_interface_structs(doc,
|
||||
&capitalized_ep_name,
|
||||
interface,
|
||||
match *execution {
|
||||
enums::ExecutionModel::ExecutionModelTessellationControl => true,
|
||||
enums::ExecutionModel::ExecutionModelTessellationEvaluation => true,
|
||||
enums::ExecutionModel::ExecutionModelTessellationControl =>
|
||||
true,
|
||||
enums::ExecutionModel::ExecutionModelTessellationEvaluation =>
|
||||
true,
|
||||
enums::ExecutionModel::ExecutionModelGeometry => true,
|
||||
_ => false
|
||||
_ => false,
|
||||
},
|
||||
match *execution {
|
||||
enums::ExecutionModel::ExecutionModelTessellationControl => true,
|
||||
enums::ExecutionModel::ExecutionModelTessellationControl =>
|
||||
true,
|
||||
_ => false,
|
||||
});
|
||||
|
||||
let (ty, f_call) = match *execution {
|
||||
enums::ExecutionModel::ExecutionModelVertex => {
|
||||
let t = format!("::vulkano::pipeline::shader::VertexShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
|
||||
let f = format!("vertex_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ vertex: true, .. ShaderStages::none() }}))", capitalized_ep_name);
|
||||
let t = format!("::vulkano::pipeline::shader::VertexShaderEntryPoint<(), {0}Input, \
|
||||
{0}Output, Layout>",
|
||||
capitalized_ep_name);
|
||||
let f = format!("vertex_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() \
|
||||
as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ vertex: \
|
||||
true, .. ShaderStages::none() }}))",
|
||||
capitalized_ep_name);
|
||||
(t, f)
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelTessellationControl => {
|
||||
let t = format!("::vulkano::pipeline::shader::TessControlShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
|
||||
let f = format!("tess_control_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ tessellation_control: true, .. ShaderStages::none() }}))", capitalized_ep_name);
|
||||
let t = format!("::vulkano::pipeline::shader::TessControlShaderEntryPoint<(), \
|
||||
{0}Input, {0}Output, Layout>",
|
||||
capitalized_ep_name);
|
||||
let f = format!("tess_control_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
|
||||
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
|
||||
tessellation_control: true, .. ShaderStages::none() }}))",
|
||||
capitalized_ep_name);
|
||||
(t, f)
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelTessellationEvaluation => {
|
||||
let t = format!("::vulkano::pipeline::shader::TessEvaluationShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
|
||||
let f = format!("tess_evaluation_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ tessellation_evaluation: true, .. ShaderStages::none() }}))", capitalized_ep_name);
|
||||
let t = format!("::vulkano::pipeline::shader::TessEvaluationShaderEntryPoint<(), \
|
||||
{0}Input, {0}Output, Layout>",
|
||||
capitalized_ep_name);
|
||||
let f = format!("tess_evaluation_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
|
||||
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
|
||||
tessellation_evaluation: true, .. ShaderStages::none() }}))",
|
||||
capitalized_ep_name);
|
||||
(t, f)
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelGeometry => {
|
||||
let t = format!("::vulkano::pipeline::shader::GeometryShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
|
||||
let f = format!("geometry_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ geometry: true, .. ShaderStages::none() }}))", capitalized_ep_name);
|
||||
let t = format!("::vulkano::pipeline::shader::GeometryShaderEntryPoint<(), {0}Input, \
|
||||
{0}Output, Layout>",
|
||||
capitalized_ep_name);
|
||||
let f = format!("geometry_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
|
||||
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
|
||||
geometry: true, .. ShaderStages::none() }}))",
|
||||
capitalized_ep_name);
|
||||
(t, f)
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelFragment => {
|
||||
let t = format!("::vulkano::pipeline::shader::FragmentShaderEntryPoint<(), {0}Input, {0}Output, Layout>", capitalized_ep_name);
|
||||
let f = format!("fragment_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ fragment: true, .. ShaderStages::none() }}))", capitalized_ep_name);
|
||||
let t = format!("::vulkano::pipeline::shader::FragmentShaderEntryPoint<(), {0}Input, \
|
||||
{0}Output, Layout>",
|
||||
capitalized_ep_name);
|
||||
let f = format!("fragment_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.\
|
||||
as_ptr() as *const _), {0}Input, {0}Output, Layout(ShaderStages {{ \
|
||||
fragment: true, .. ShaderStages::none() }}))",
|
||||
capitalized_ep_name);
|
||||
(t, f)
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelGLCompute => {
|
||||
(format!("::vulkano::pipeline::shader::ComputeShaderEntryPoint<(), Layout>"),
|
||||
format!("compute_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as *const _), Layout(ShaderStages {{ compute: true, .. ShaderStages::none() }}))"))
|
||||
format!("compute_shader_entry_point(::std::ffi::CStr::from_ptr(NAME.as_ptr() as \
|
||||
*const _), Layout(ShaderStages {{ compute: true, .. ShaderStages::none() \
|
||||
}}))"))
|
||||
},
|
||||
|
||||
enums::ExecutionModel::ExecutionModelKernel => panic!("Kernels are not supported"),
|
||||
};
|
||||
|
||||
let entry_point = format!(r#"
|
||||
let entry_point = format!(
|
||||
r#"
|
||||
/// Returns a logical struct describing the entry point named `{ep_name}`.
|
||||
#[inline]
|
||||
#[allow(unsafe_code)]
|
||||
@ -88,18 +131,24 @@ pub fn write_entry_point(doc: &parse::Spirv, instruction: &parse::Instruction) -
|
||||
self.shader.{f_call}
|
||||
}}
|
||||
}}
|
||||
"#, ep_name = ep_name, ep_name_lenp1 = ep_name.chars().count() + 1, ty = ty,
|
||||
encoded_ep_name = ep_name.chars().map(|c| (c as u32).to_string())
|
||||
.collect::<Vec<String>>().join(", "),
|
||||
f_call = f_call);
|
||||
"#,
|
||||
ep_name = ep_name,
|
||||
ep_name_lenp1 = ep_name.chars().count() + 1,
|
||||
ty = ty,
|
||||
encoded_ep_name = ep_name
|
||||
.chars()
|
||||
.map(|c| (c as u32).to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.join(", "),
|
||||
f_call = f_call
|
||||
);
|
||||
|
||||
(interface_structs, entry_point)
|
||||
}
|
||||
|
||||
fn write_interface_structs(doc: &parse::Spirv, capitalized_ep_name: &str, interface: &[u32],
|
||||
ignore_first_array_in: bool, ignore_first_array_out: bool)
|
||||
-> String
|
||||
{
|
||||
-> String {
|
||||
let mut input_elements = Vec::new();
|
||||
let mut output_elements = Vec::new();
|
||||
|
||||
@ -107,30 +156,40 @@ fn write_interface_structs(doc: &parse::Spirv, capitalized_ep_name: &str, interf
|
||||
for interface in interface.iter() {
|
||||
for i in doc.instructions.iter() {
|
||||
match i {
|
||||
&parse::Instruction::Variable { result_type_id, result_id, ref storage_class, .. }
|
||||
if &result_id == interface =>
|
||||
{
|
||||
&parse::Instruction::Variable {
|
||||
result_type_id,
|
||||
result_id,
|
||||
ref storage_class,
|
||||
..
|
||||
} if &result_id == interface => {
|
||||
if is_builtin(doc, result_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (to_write, ignore_first_array) = match storage_class {
|
||||
&enums::StorageClass::StorageClassInput => (&mut input_elements, ignore_first_array_in),
|
||||
&enums::StorageClass::StorageClassOutput => (&mut output_elements, ignore_first_array_out),
|
||||
_ => continue
|
||||
&enums::StorageClass::StorageClassInput => (&mut input_elements,
|
||||
ignore_first_array_in),
|
||||
&enums::StorageClass::StorageClassOutput => (&mut output_elements,
|
||||
ignore_first_array_out),
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let name = name_from_id(doc, result_id);
|
||||
if name == "__unnamed" { continue; } // FIXME: hack
|
||||
if name == "__unnamed" {
|
||||
continue;
|
||||
} // FIXME: hack
|
||||
|
||||
let loc = match location_decoration(doc, result_id) {
|
||||
Some(l) => l,
|
||||
None => panic!("Attribute `{}` (id {}) is missing a location", name, result_id)
|
||||
None => panic!("Attribute `{}` (id {}) is missing a location",
|
||||
name,
|
||||
result_id),
|
||||
};
|
||||
|
||||
to_write.push((loc, name, format_from_id(doc, result_type_id, ignore_first_array)));
|
||||
to_write
|
||||
.push((loc, name, format_from_id(doc, result_type_id, ignore_first_array)));
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -139,7 +198,8 @@ fn write_interface_structs(doc: &parse::Spirv, capitalized_ep_name: &str, interf
|
||||
&write_interface_struct(&format!("{}Output", capitalized_ep_name), &output_elements)
|
||||
}
|
||||
|
||||
fn write_interface_struct(struct_name: &str, attributes: &[(u32, String, (String, usize))]) -> String {
|
||||
fn write_interface_struct(struct_name: &str, attributes: &[(u32, String, (String, usize))])
|
||||
-> String {
|
||||
// Checking for overlapping elements.
|
||||
for (offset, &(loc, ref name, (_, loc_len))) in attributes.iter().enumerate() {
|
||||
for &(loc2, ref name2, (_, loc_len2)) in attributes.iter().skip(offset + 1) {
|
||||
@ -148,15 +208,24 @@ fn write_interface_struct(struct_name: &str, attributes: &[(u32, String, (String
|
||||
{
|
||||
panic!("The locations of attributes `{}` (start={}, size={}) \
|
||||
and `{}` (start={}, size={}) overlap",
|
||||
name, loc, loc_len, name2, loc2, loc_len2);
|
||||
name,
|
||||
loc,
|
||||
loc_len,
|
||||
name2,
|
||||
loc2,
|
||||
loc_len2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let body = attributes.iter().enumerate().map(|(num, &(loc, ref name, (ref ty, num_locs)))| {
|
||||
let body = attributes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(num, &(loc, ref name, (ref ty, num_locs)))| {
|
||||
assert!(num_locs >= 1);
|
||||
|
||||
format!("if self.num == {} {{
|
||||
format!(
|
||||
"if self.num == {} {{
|
||||
self.num += 1;
|
||||
|
||||
return Some(::vulkano::pipeline::shader::ShaderInterfaceDefEntry {{
|
||||
@ -164,39 +233,61 @@ fn write_interface_struct(struct_name: &str, attributes: &[(u32, String, (String
|
||||
format: ::vulkano::format::Format::{},
|
||||
name: Some(::std::borrow::Cow::Borrowed(\"{}\"))
|
||||
}});
|
||||
}}", num, loc, loc as usize + num_locs, ty, name)
|
||||
}).collect::<Vec<_>>().join("");
|
||||
}}",
|
||||
num,
|
||||
loc,
|
||||
loc as usize + num_locs,
|
||||
ty,
|
||||
name
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
|
||||
format!("
|
||||
format!(
|
||||
"
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct {name};
|
||||
|
||||
\
|
||||
#[allow(unsafe_code)]
|
||||
unsafe impl ::vulkano::pipeline::shader::ShaderInterfaceDef for {name} {{
|
||||
unsafe impl ::vulkano::pipeline::shader::ShaderInterfaceDef for \
|
||||
{name} {{
|
||||
type Iter = {name}Iter;
|
||||
fn elements(&self) -> {name}Iter {{
|
||||
\
|
||||
{name}Iter {{ num: 0 }}
|
||||
}}
|
||||
}}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
\
|
||||
pub struct {name}Iter {{ num: u16 }}
|
||||
impl Iterator for {name}Iter {{
|
||||
type Item = ::vulkano::pipeline::shader::ShaderInterfaceDefEntry;
|
||||
type \
|
||||
Item = ::vulkano::pipeline::shader::ShaderInterfaceDefEntry;
|
||||
|
||||
#[inline]
|
||||
\
|
||||
fn next(&mut self) -> Option<Self::Item> {{
|
||||
{body}
|
||||
None
|
||||
\
|
||||
}}
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {{
|
||||
\
|
||||
let len = ({len} - self.num) as usize;
|
||||
(len, Some(len))
|
||||
}}
|
||||
\
|
||||
}}
|
||||
|
||||
impl ExactSizeIterator for {name}Iter {{}}
|
||||
", name = struct_name, body = body, len = attributes.len())
|
||||
",
|
||||
name = struct_name,
|
||||
body = body,
|
||||
len = attributes.len()
|
||||
)
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ use std::io::Read;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
pub use parse::ParseError;
|
||||
pub use glsl_to_spirv::ShaderType;
|
||||
pub use parse::ParseError;
|
||||
|
||||
mod descriptor_sets;
|
||||
mod entry_point;
|
||||
@ -43,7 +43,9 @@ pub fn build_glsl_shaders<'a, I>(shaders: I)
|
||||
|
||||
let shader_content = {
|
||||
let mut s = String::new();
|
||||
File::open(shader).expect("failed to open shader").read_to_string(&mut s)
|
||||
File::open(shader)
|
||||
.expect("failed to open shader")
|
||||
.read_to_string(&mut s)
|
||||
.expect("failed to read shader content");
|
||||
s
|
||||
};
|
||||
@ -65,13 +67,14 @@ pub fn reflect<R>(name: &str, mut spirv: R) -> Result<String, Error>
|
||||
where R: Read
|
||||
{
|
||||
let mut data = Vec::new();
|
||||
try!(spirv.read_to_end(&mut data));
|
||||
spirv.read_to_end(&mut data)?;
|
||||
|
||||
// now parsing the document
|
||||
let doc = try!(parse::parse_spirv(&data));
|
||||
let doc = parse::parse_spirv(&data)?;
|
||||
|
||||
let mut output = String::new();
|
||||
output.push_str(r#"
|
||||
output.push_str(
|
||||
r#"
|
||||
#[allow(unused_imports)]
|
||||
use std::sync::Arc;
|
||||
#[allow(unused_imports)]
|
||||
@ -109,16 +112,19 @@ pub fn reflect<R>(name: &str, mut spirv: R) -> Result<String, Error>
|
||||
use vulkano::descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
#[allow(unused_imports)]
|
||||
use vulkano::descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
"#);
|
||||
"#,
|
||||
);
|
||||
|
||||
{
|
||||
// contains the data that was passed as input to this function
|
||||
let spirv_data = data.iter().map(|&byte| byte.to_string())
|
||||
let spirv_data = data.iter()
|
||||
.map(|&byte| byte.to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.join(", ");
|
||||
|
||||
// writing the header
|
||||
output.push_str(&format!(r#"
|
||||
output.push_str(&format!(
|
||||
r#"
|
||||
pub struct {name} {{
|
||||
shader: ::std::sync::Arc<::vulkano::pipeline::shader::ShaderModule>,
|
||||
}}
|
||||
@ -131,23 +137,29 @@ impl {name} {{
|
||||
-> Result<{name}, ::vulkano::OomError>
|
||||
{{
|
||||
|
||||
"#, name = name));
|
||||
"#,
|
||||
name = name
|
||||
));
|
||||
|
||||
// checking whether each required capability is enabled in the vulkan device
|
||||
for i in doc.instructions.iter() {
|
||||
if let &parse::Instruction::Capability(ref cap) = i {
|
||||
if let Some(cap) = capability_name(cap) {
|
||||
output.push_str(&format!(r#"
|
||||
output.push_str(&format!(
|
||||
r#"
|
||||
if !device.enabled_features().{cap} {{
|
||||
panic!("capability {{:?}} not enabled", "{cap}") // FIXME: error
|
||||
//return Err(CapabilityNotEnabled);
|
||||
}}"#, cap = cap));
|
||||
}}"#,
|
||||
cap = cap
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// follow-up of the header
|
||||
output.push_str(&format!(r#"
|
||||
output.push_str(&format!(
|
||||
r#"
|
||||
unsafe {{
|
||||
let data = [{spirv_data}];
|
||||
|
||||
@ -163,7 +175,10 @@ impl {name} {{
|
||||
pub fn module(&self) -> &::std::sync::Arc<::vulkano::pipeline::shader::ShaderModule> {{
|
||||
&self.shader
|
||||
}}
|
||||
"#, name = name, spirv_data = spirv_data));
|
||||
"#,
|
||||
name = name,
|
||||
spirv_data = spirv_data
|
||||
));
|
||||
|
||||
// writing one method for each entry point of this module
|
||||
let mut outside_impl = String::new();
|
||||
@ -176,9 +191,11 @@ impl {name} {{
|
||||
}
|
||||
|
||||
// footer
|
||||
output.push_str(&format!(r#"
|
||||
output.push_str(&format!(
|
||||
r#"
|
||||
}}
|
||||
"#));
|
||||
"#
|
||||
));
|
||||
|
||||
output.push_str(&outside_impl);
|
||||
|
||||
@ -222,7 +239,11 @@ impl From<ParseError> for Error {
|
||||
fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -> (String, usize) {
|
||||
for instruction in doc.instructions.iter() {
|
||||
match instruction {
|
||||
&parse::Instruction::TypeInt { result_id, width, signedness } if result_id == searched => {
|
||||
&parse::Instruction::TypeInt {
|
||||
result_id,
|
||||
width,
|
||||
signedness,
|
||||
} if result_id == searched => {
|
||||
assert!(!ignore_first_array);
|
||||
return (match (width, signedness) {
|
||||
(8, true) => "R8Sint",
|
||||
@ -233,18 +254,24 @@ fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -
|
||||
(32, false) => "R32Uint",
|
||||
(64, true) => "R64Sint",
|
||||
(64, false) => "R64Uint",
|
||||
_ => panic!()
|
||||
}.to_owned(), 1);
|
||||
_ => panic!(),
|
||||
}.to_owned(),
|
||||
1);
|
||||
},
|
||||
&parse::Instruction::TypeFloat { result_id, width } if result_id == searched => {
|
||||
assert!(!ignore_first_array);
|
||||
return (match width {
|
||||
32 => "R32Sfloat",
|
||||
64 => "R64Sfloat",
|
||||
_ => panic!()
|
||||
}.to_owned(), 1);
|
||||
_ => panic!(),
|
||||
}.to_owned(),
|
||||
1);
|
||||
},
|
||||
&parse::Instruction::TypeVector { result_id, component_id, count } if result_id == searched => {
|
||||
&parse::Instruction::TypeVector {
|
||||
result_id,
|
||||
component_id,
|
||||
count,
|
||||
} if result_id == searched => {
|
||||
assert!(!ignore_first_array);
|
||||
let (format, sz) = format_from_id(doc, component_id, false);
|
||||
assert!(format.starts_with("R32"));
|
||||
@ -262,27 +289,45 @@ fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -
|
||||
};
|
||||
return (format, sz);
|
||||
},
|
||||
&parse::Instruction::TypeMatrix { result_id, column_type_id, column_count } if result_id == searched => {
|
||||
&parse::Instruction::TypeMatrix {
|
||||
result_id,
|
||||
column_type_id,
|
||||
column_count,
|
||||
} if result_id == searched => {
|
||||
assert!(!ignore_first_array);
|
||||
let (format, sz) = format_from_id(doc, column_type_id, false);
|
||||
return (format, sz * column_count as usize);
|
||||
},
|
||||
&parse::Instruction::TypeArray { result_id, type_id, length_id } if result_id == searched => {
|
||||
&parse::Instruction::TypeArray {
|
||||
result_id,
|
||||
type_id,
|
||||
length_id,
|
||||
} if result_id == searched => {
|
||||
if ignore_first_array {
|
||||
return format_from_id(doc, type_id, false);
|
||||
}
|
||||
|
||||
let (format, sz) = format_from_id(doc, type_id, false);
|
||||
let len = doc.instructions.iter().filter_map(|e| {
|
||||
match e { &parse::Instruction::Constant { result_id, ref data, .. } if result_id == length_id => Some(data.clone()), _ => None }
|
||||
}).next().expect("failed to find array length");
|
||||
let len = doc.instructions
|
||||
.iter()
|
||||
.filter_map(|e| match e {
|
||||
&parse::Instruction::Constant {
|
||||
result_id,
|
||||
ref data,
|
||||
..
|
||||
} if result_id == length_id => Some(data.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.next()
|
||||
.expect("failed to find array length");
|
||||
let len = len.iter().rev().fold(0u64, |a, &b| (a << 32) | b as u64);
|
||||
return (format, sz * len as usize);
|
||||
},
|
||||
&parse::Instruction::TypePointer { result_id, type_id, .. } if result_id == searched => {
|
||||
&parse::Instruction::TypePointer { result_id, type_id, .. }
|
||||
if result_id == searched => {
|
||||
return format_from_id(doc, type_id, ignore_first_array);
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,8 +335,13 @@ fn format_from_id(doc: &parse::Spirv, searched: u32, ignore_first_array: bool) -
|
||||
}
|
||||
|
||||
fn name_from_id(doc: &parse::Spirv, searched: u32) -> String {
|
||||
doc.instructions.iter().filter_map(|i| {
|
||||
if let &parse::Instruction::Name { target_id, ref name } = i {
|
||||
doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| if let &parse::Instruction::Name {
|
||||
target_id,
|
||||
ref name,
|
||||
} = i
|
||||
{
|
||||
if target_id == searched {
|
||||
Some(name.clone())
|
||||
} else {
|
||||
@ -299,14 +349,21 @@ fn name_from_id(doc: &parse::Spirv, searched: u32) -> String {
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}).next().and_then(|n| if !n.is_empty() { Some(n) } else { None })
|
||||
})
|
||||
.next()
|
||||
.and_then(|n| if !n.is_empty() { Some(n) } else { None })
|
||||
.unwrap_or("__unnamed".to_owned())
|
||||
}
|
||||
|
||||
fn member_name_from_id(doc: &parse::Spirv, searched: u32, searched_member: u32) -> String {
|
||||
doc.instructions.iter().filter_map(|i| {
|
||||
if let &parse::Instruction::MemberName { target_id, member, ref name } = i {
|
||||
doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| if let &parse::Instruction::MemberName {
|
||||
target_id,
|
||||
member,
|
||||
ref name,
|
||||
} = i
|
||||
{
|
||||
if target_id == searched && member == searched_member {
|
||||
Some(name.clone())
|
||||
} else {
|
||||
@ -314,14 +371,21 @@ fn member_name_from_id(doc: &parse::Spirv, searched: u32, searched_member: u32)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}).next().and_then(|n| if !n.is_empty() { Some(n) } else { None })
|
||||
})
|
||||
.next()
|
||||
.and_then(|n| if !n.is_empty() { Some(n) } else { None })
|
||||
.unwrap_or("__unnamed".to_owned())
|
||||
}
|
||||
|
||||
fn location_decoration(doc: &parse::Spirv, searched: u32) -> Option<u32> {
|
||||
doc.instructions.iter().filter_map(|i| {
|
||||
if let &parse::Instruction::Decorate { target_id, decoration: enums::Decoration::DecorationLocation, ref params } = i {
|
||||
doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| if let &parse::Instruction::Decorate {
|
||||
target_id,
|
||||
decoration: enums::Decoration::DecorationLocation,
|
||||
ref params,
|
||||
} = i
|
||||
{
|
||||
if target_id == searched {
|
||||
Some(params[0])
|
||||
} else {
|
||||
@ -329,33 +393,39 @@ fn location_decoration(doc: &parse::Spirv, searched: u32) -> Option<u32> {
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}).next()
|
||||
})
|
||||
.next()
|
||||
}
|
||||
|
||||
/// Returns true if a `BuiltIn` decorator is applied on an id.
|
||||
fn is_builtin(doc: &parse::Spirv, id: u32) -> bool {
|
||||
for instruction in &doc.instructions {
|
||||
match *instruction {
|
||||
parse::Instruction::Decorate { target_id,
|
||||
parse::Instruction::Decorate {
|
||||
target_id,
|
||||
decoration: enums::Decoration::DecorationBuiltIn,
|
||||
.. } if target_id == id =>
|
||||
{
|
||||
..
|
||||
} if target_id == id => {
|
||||
return true;
|
||||
},
|
||||
parse::Instruction::MemberDecorate { target_id,
|
||||
parse::Instruction::MemberDecorate {
|
||||
target_id,
|
||||
decoration: enums::Decoration::DecorationBuiltIn,
|
||||
.. } if target_id == id =>
|
||||
{
|
||||
..
|
||||
} if target_id == id => {
|
||||
return true;
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
for instruction in &doc.instructions {
|
||||
match *instruction {
|
||||
parse::Instruction::Variable { result_type_id, result_id, .. } if result_id == id => {
|
||||
parse::Instruction::Variable {
|
||||
result_type_id,
|
||||
result_id,
|
||||
..
|
||||
} if result_id == id => {
|
||||
return is_builtin(doc, result_type_id);
|
||||
},
|
||||
parse::Instruction::TypeArray { result_id, type_id, .. } if result_id == id => {
|
||||
@ -364,15 +434,20 @@ fn is_builtin(doc: &parse::Spirv, id: u32) -> bool {
|
||||
parse::Instruction::TypeRuntimeArray { result_id, type_id } if result_id == id => {
|
||||
return is_builtin(doc, type_id);
|
||||
},
|
||||
parse::Instruction::TypeStruct { result_id, ref member_types } if result_id == id => {
|
||||
parse::Instruction::TypeStruct {
|
||||
result_id,
|
||||
ref member_types,
|
||||
} if result_id == id => {
|
||||
for &mem in member_types {
|
||||
if is_builtin(doc, mem) { return true; }
|
||||
if is_builtin(doc, mem) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
},
|
||||
parse::Instruction::TypePointer { result_id, type_id, .. } if result_id == id => {
|
||||
return is_builtin(doc, type_id);
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
@ -407,14 +482,21 @@ fn capability_name(cap: &enums::Capability) -> Option<&'static str> {
|
||||
enums::Capability::CapabilityLiteralSampler => panic!(), // not supported
|
||||
enums::Capability::CapabilityAtomicStorage => panic!(), // not supported
|
||||
enums::Capability::CapabilityInt16 => Some("shader_int16"),
|
||||
enums::Capability::CapabilityTessellationPointSize => Some("shader_tessellation_and_geometry_point_size"),
|
||||
enums::Capability::CapabilityGeometryPointSize => Some("shader_tessellation_and_geometry_point_size"),
|
||||
enums::Capability::CapabilityTessellationPointSize =>
|
||||
Some("shader_tessellation_and_geometry_point_size"),
|
||||
enums::Capability::CapabilityGeometryPointSize =>
|
||||
Some("shader_tessellation_and_geometry_point_size"),
|
||||
enums::Capability::CapabilityImageGatherExtended => Some("shader_image_gather_extended"),
|
||||
enums::Capability::CapabilityStorageImageMultisample => Some("shader_storage_image_multisample"),
|
||||
enums::Capability::CapabilityUniformBufferArrayDynamicIndexing => Some("shader_uniform_buffer_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilitySampledImageArrayDynamicIndexing => Some("shader_sampled_image_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilityStorageBufferArrayDynamicIndexing => Some("shader_storage_buffer_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilityStorageImageArrayDynamicIndexing => Some("shader_storage_image_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilityStorageImageMultisample =>
|
||||
Some("shader_storage_image_multisample"),
|
||||
enums::Capability::CapabilityUniformBufferArrayDynamicIndexing =>
|
||||
Some("shader_uniform_buffer_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilitySampledImageArrayDynamicIndexing =>
|
||||
Some("shader_sampled_image_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilityStorageBufferArrayDynamicIndexing =>
|
||||
Some("shader_storage_buffer_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilityStorageImageArrayDynamicIndexing =>
|
||||
Some("shader_storage_image_array_dynamic_indexing"),
|
||||
enums::Capability::CapabilityClipDistance => Some("shader_clip_distance"),
|
||||
enums::Capability::CapabilityCullDistance => Some("shader_cull_distance"),
|
||||
enums::Capability::CapabilityImageCubeArray => Some("image_cube_array"),
|
||||
@ -432,14 +514,17 @@ fn capability_name(cap: &enums::Capability) -> Option<&'static str> {
|
||||
enums::Capability::CapabilitySampledBuffer => None, // always supported
|
||||
enums::Capability::CapabilityImageBuffer => None, // always supported
|
||||
enums::Capability::CapabilityImageMSArray => Some("shader_storage_image_multisample"),
|
||||
enums::Capability::CapabilityStorageImageExtendedFormats => Some("shader_storage_image_extended_formats"),
|
||||
enums::Capability::CapabilityStorageImageExtendedFormats =>
|
||||
Some("shader_storage_image_extended_formats"),
|
||||
enums::Capability::CapabilityImageQuery => None, // always supported
|
||||
enums::Capability::CapabilityDerivativeControl => None, // always supported
|
||||
enums::Capability::CapabilityInterpolationFunction => Some("sample_rate_shading"),
|
||||
enums::Capability::CapabilityTransformFeedback => panic!(), // not supported
|
||||
enums::Capability::CapabilityGeometryStreams => panic!(), // not supported
|
||||
enums::Capability::CapabilityStorageImageReadWithoutFormat => Some("shader_storage_image_read_without_format"),
|
||||
enums::Capability::CapabilityStorageImageWriteWithoutFormat => Some("shader_storage_image_write_without_format"),
|
||||
enums::Capability::CapabilityStorageImageReadWithoutFormat =>
|
||||
Some("shader_storage_image_read_without_format"),
|
||||
enums::Capability::CapabilityStorageImageWriteWithoutFormat =>
|
||||
Some("shader_storage_image_write_without_format"),
|
||||
enums::Capability::CapabilityMultiViewport => Some("multi_viewport"),
|
||||
}
|
||||
}
|
||||
|
@ -19,15 +19,21 @@ pub fn parse_spirv(data: &[u8]) -> Result<Spirv, ParseError> {
|
||||
// on the magic number at the start of the file
|
||||
let data = if data[0] == 0x07 && data[1] == 0x23 && data[2] == 0x02 && data[3] == 0x03 {
|
||||
// big endian
|
||||
data.chunks(4).map(|c| {
|
||||
((c[0] as u32) << 24) | ((c[1] as u32) << 16) | ((c[2] as u32) << 8) | c[3] as u32
|
||||
}).collect::<Vec<_>>()
|
||||
data.chunks(4)
|
||||
.map(|c| {
|
||||
((c[0] as u32) << 24) | ((c[1] as u32) << 16) | ((c[2] as u32) << 8) |
|
||||
c[3] as u32
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
|
||||
} else if data[3] == 0x07 && data[2] == 0x23 && data[1] == 0x02 && data[0] == 0x03 {
|
||||
// little endian
|
||||
data.chunks(4).map(|c| {
|
||||
((c[3] as u32) << 24) | ((c[2] as u32) << 16) | ((c[1] as u32) << 8) | c[0] as u32
|
||||
}).collect::<Vec<_>>()
|
||||
data.chunks(4)
|
||||
.map(|c| {
|
||||
((c[3] as u32) << 24) | ((c[2] as u32) << 16) | ((c[1] as u32) << 8) |
|
||||
c[0] as u32
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
|
||||
} else {
|
||||
return Err(ParseError::MissingHeader);
|
||||
@ -54,7 +60,7 @@ fn parse_u32s(i: &[u32]) -> Result<Spirv, ParseError> {
|
||||
let mut ret = Vec::new();
|
||||
let mut i = &i[5 ..];
|
||||
while i.len() >= 1 {
|
||||
let (instruction, rest) = try!(parse_instruction(i));
|
||||
let (instruction, rest) = parse_instruction(i)?;
|
||||
ret.push(instruction);
|
||||
i = rest;
|
||||
}
|
||||
@ -89,30 +95,90 @@ pub enum Instruction {
|
||||
Unknown(u16, Vec<u32>),
|
||||
Nop,
|
||||
Name { target_id: u32, name: String },
|
||||
MemberName { target_id: u32, member: u32, name: String },
|
||||
MemberName {
|
||||
target_id: u32,
|
||||
member: u32,
|
||||
name: String,
|
||||
},
|
||||
ExtInstImport { result_id: u32, name: String },
|
||||
MemoryModel(AddressingModel, MemoryModel),
|
||||
EntryPoint { execution: ExecutionModel, id: u32, name: String, interface: Vec<u32> },
|
||||
EntryPoint {
|
||||
execution: ExecutionModel,
|
||||
id: u32,
|
||||
name: String,
|
||||
interface: Vec<u32>,
|
||||
},
|
||||
Capability(Capability),
|
||||
TypeVoid { result_id: u32 },
|
||||
TypeBool { result_id: u32 },
|
||||
TypeInt { result_id: u32, width: u32, signedness: bool },
|
||||
TypeInt {
|
||||
result_id: u32,
|
||||
width: u32,
|
||||
signedness: bool,
|
||||
},
|
||||
TypeFloat { result_id: u32, width: u32 },
|
||||
TypeVector { result_id: u32, component_id: u32, count: u32 },
|
||||
TypeMatrix { result_id: u32, column_type_id: u32, column_count: u32 },
|
||||
TypeImage { result_id: u32, sampled_type_id: u32, dim: Dim, depth: Option<bool>, arrayed: bool, ms: bool, sampled: Option<bool>, format: ImageFormat, access: Option<AccessQualifier> },
|
||||
TypeVector {
|
||||
result_id: u32,
|
||||
component_id: u32,
|
||||
count: u32,
|
||||
},
|
||||
TypeMatrix {
|
||||
result_id: u32,
|
||||
column_type_id: u32,
|
||||
column_count: u32,
|
||||
},
|
||||
TypeImage {
|
||||
result_id: u32,
|
||||
sampled_type_id: u32,
|
||||
dim: Dim,
|
||||
depth: Option<bool>,
|
||||
arrayed: bool,
|
||||
ms: bool,
|
||||
sampled: Option<bool>,
|
||||
format: ImageFormat,
|
||||
access: Option<AccessQualifier>,
|
||||
},
|
||||
TypeSampler { result_id: u32 },
|
||||
TypeSampledImage { result_id: u32, image_type_id: u32 },
|
||||
TypeArray { result_id: u32, type_id: u32, length_id: u32 },
|
||||
TypeArray {
|
||||
result_id: u32,
|
||||
type_id: u32,
|
||||
length_id: u32,
|
||||
},
|
||||
TypeRuntimeArray { result_id: u32, type_id: u32 },
|
||||
TypeStruct { result_id: u32, member_types: Vec<u32> },
|
||||
TypeStruct {
|
||||
result_id: u32,
|
||||
member_types: Vec<u32>,
|
||||
},
|
||||
TypeOpaque { result_id: u32, name: String },
|
||||
TypePointer { result_id: u32, storage_class: StorageClass, type_id: u32 },
|
||||
Constant { result_type_id: u32, result_id: u32, data: Vec<u32> },
|
||||
TypePointer {
|
||||
result_id: u32,
|
||||
storage_class: StorageClass,
|
||||
type_id: u32,
|
||||
},
|
||||
Constant {
|
||||
result_type_id: u32,
|
||||
result_id: u32,
|
||||
data: Vec<u32>,
|
||||
},
|
||||
FunctionEnd,
|
||||
Variable { result_type_id: u32, result_id: u32, storage_class: StorageClass, initializer: Option<u32> },
|
||||
Decorate { target_id: u32, decoration: Decoration, params: Vec<u32> },
|
||||
MemberDecorate { target_id: u32, member: u32, decoration: Decoration, params: Vec<u32> },
|
||||
Variable {
|
||||
result_type_id: u32,
|
||||
result_id: u32,
|
||||
storage_class: StorageClass,
|
||||
initializer: Option<u32>,
|
||||
},
|
||||
Decorate {
|
||||
target_id: u32,
|
||||
decoration: Decoration,
|
||||
params: Vec<u32>,
|
||||
},
|
||||
MemberDecorate {
|
||||
target_id: u32,
|
||||
member: u32,
|
||||
decoration: Decoration,
|
||||
params: Vec<u32>,
|
||||
},
|
||||
Label { result_id: u32 },
|
||||
Branch { result_id: u32 },
|
||||
Kill,
|
||||
@ -130,63 +196,134 @@ fn parse_instruction(i: &[u32]) -> Result<(Instruction, &[u32]), ParseError> {
|
||||
return Err(ParseError::IncompleteInstruction);
|
||||
}
|
||||
|
||||
let opcode = try!(decode_instruction(opcode, &i[1 .. word_count]));
|
||||
let opcode = decode_instruction(opcode, &i[1 .. word_count])?;
|
||||
Ok((opcode, &i[word_count ..]))
|
||||
}
|
||||
|
||||
fn decode_instruction(opcode: u16, operands: &[u32]) -> Result<Instruction, ParseError> {
|
||||
Ok(match opcode {
|
||||
0 => Instruction::Nop,
|
||||
5 => Instruction::Name { target_id: operands[0], name: parse_string(&operands[1..]).0 },
|
||||
6 => Instruction::MemberName { target_id: operands[0], member: operands[1], name: parse_string(&operands[2..]).0 },
|
||||
5 => Instruction::Name {
|
||||
target_id: operands[0],
|
||||
name: parse_string(&operands[1 ..]).0,
|
||||
},
|
||||
6 => Instruction::MemberName {
|
||||
target_id: operands[0],
|
||||
member: operands[1],
|
||||
name: parse_string(&operands[2 ..]).0,
|
||||
},
|
||||
11 => Instruction::ExtInstImport {
|
||||
result_id: operands[0],
|
||||
name: parse_string(&operands[1..]).0
|
||||
name: parse_string(&operands[1 ..]).0,
|
||||
},
|
||||
14 => Instruction::MemoryModel(try!(AddressingModel::from_num(operands[0])), try!(MemoryModel::from_num(operands[1]))),
|
||||
14 => Instruction::MemoryModel(AddressingModel::from_num(operands[0])?,
|
||||
MemoryModel::from_num(operands[1])?),
|
||||
15 => {
|
||||
let (n, r) = parse_string(&operands[2 ..]);
|
||||
Instruction::EntryPoint {
|
||||
execution: try!(ExecutionModel::from_num(operands[0])),
|
||||
execution: ExecutionModel::from_num(operands[0])?,
|
||||
id: operands[1],
|
||||
name: n,
|
||||
interface: r.to_owned(),
|
||||
}
|
||||
},
|
||||
17 => Instruction::Capability(try!(Capability::from_num(operands[0]))),
|
||||
17 => Instruction::Capability(Capability::from_num(operands[0])?),
|
||||
19 => Instruction::TypeVoid { result_id: operands[0] },
|
||||
20 => Instruction::TypeBool { result_id: operands[0] },
|
||||
21 => Instruction::TypeInt { result_id: operands[0], width: operands[1], signedness: operands[2] != 0 },
|
||||
22 => Instruction::TypeFloat { result_id: operands[0], width: operands[1] },
|
||||
23 => Instruction::TypeVector { result_id: operands[0], component_id: operands[1], count: operands[2] },
|
||||
24 => Instruction::TypeMatrix { result_id: operands[0], column_type_id: operands[1], column_count: operands[2] },
|
||||
21 => Instruction::TypeInt {
|
||||
result_id: operands[0],
|
||||
width: operands[1],
|
||||
signedness: operands[2] != 0,
|
||||
},
|
||||
22 => Instruction::TypeFloat {
|
||||
result_id: operands[0],
|
||||
width: operands[1],
|
||||
},
|
||||
23 => Instruction::TypeVector {
|
||||
result_id: operands[0],
|
||||
component_id: operands[1],
|
||||
count: operands[2],
|
||||
},
|
||||
24 => Instruction::TypeMatrix {
|
||||
result_id: operands[0],
|
||||
column_type_id: operands[1],
|
||||
column_count: operands[2],
|
||||
},
|
||||
25 => Instruction::TypeImage {
|
||||
result_id: operands[0],
|
||||
sampled_type_id: operands[1],
|
||||
dim: try!(Dim::from_num(operands[2])),
|
||||
depth: match operands[3] { 0 => Some(false), 1 => Some(true), 2 => None, _ => unreachable!() },
|
||||
dim: Dim::from_num(operands[2])?,
|
||||
depth: match operands[3] {
|
||||
0 => Some(false),
|
||||
1 => Some(true),
|
||||
2 => None,
|
||||
_ => unreachable!(),
|
||||
},
|
||||
arrayed: operands[4] != 0,
|
||||
ms: operands[5] != 0,
|
||||
sampled: match operands[6] { 0 => None, 1 => Some(true), 2 => Some(false), _ => unreachable!() },
|
||||
format: try!(ImageFormat::from_num(operands[7])),
|
||||
access: if operands.len() >= 9 { Some(try!(AccessQualifier::from_num(operands[8]))) } else { None },
|
||||
sampled: match operands[6] {
|
||||
0 => None,
|
||||
1 => Some(true),
|
||||
2 => Some(false),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
format: ImageFormat::from_num(operands[7])?,
|
||||
access: if operands.len() >= 9 {
|
||||
Some(AccessQualifier::from_num(operands[8])?)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
26 => Instruction::TypeSampler { result_id: operands[0] },
|
||||
27 => Instruction::TypeSampledImage { result_id: operands[0], image_type_id: operands[1] },
|
||||
28 => Instruction::TypeArray { result_id: operands[0], type_id: operands[1], length_id: operands[2] },
|
||||
29 => Instruction::TypeRuntimeArray { result_id: operands[0], type_id: operands[1] },
|
||||
30 => Instruction::TypeStruct { result_id: operands[0], member_types: operands[1..].to_owned() },
|
||||
31 => Instruction::TypeOpaque { result_id: operands[0], name: parse_string(&operands[1..]).0 },
|
||||
32 => Instruction::TypePointer { result_id: operands[0], storage_class: try!(StorageClass::from_num(operands[1])), type_id: operands[2] },
|
||||
43 => Instruction::Constant { result_type_id: operands[0], result_id: operands[1], data: operands[2..].to_owned() },
|
||||
27 => Instruction::TypeSampledImage {
|
||||
result_id: operands[0],
|
||||
image_type_id: operands[1],
|
||||
},
|
||||
28 => Instruction::TypeArray {
|
||||
result_id: operands[0],
|
||||
type_id: operands[1],
|
||||
length_id: operands[2],
|
||||
},
|
||||
29 => Instruction::TypeRuntimeArray {
|
||||
result_id: operands[0],
|
||||
type_id: operands[1],
|
||||
},
|
||||
30 => Instruction::TypeStruct {
|
||||
result_id: operands[0],
|
||||
member_types: operands[1 ..].to_owned(),
|
||||
},
|
||||
31 => Instruction::TypeOpaque {
|
||||
result_id: operands[0],
|
||||
name: parse_string(&operands[1 ..]).0,
|
||||
},
|
||||
32 => Instruction::TypePointer {
|
||||
result_id: operands[0],
|
||||
storage_class: StorageClass::from_num(operands[1])?,
|
||||
type_id: operands[2],
|
||||
},
|
||||
43 => Instruction::Constant {
|
||||
result_type_id: operands[0],
|
||||
result_id: operands[1],
|
||||
data: operands[2 ..].to_owned(),
|
||||
},
|
||||
56 => Instruction::FunctionEnd,
|
||||
59 => Instruction::Variable {
|
||||
result_type_id: operands[0], result_id: operands[1],
|
||||
storage_class: try!(StorageClass::from_num(operands[2])),
|
||||
initializer: operands.get(3).map(|&v| v)
|
||||
result_type_id: operands[0],
|
||||
result_id: operands[1],
|
||||
storage_class: StorageClass::from_num(operands[2])?,
|
||||
initializer: operands.get(3).map(|&v| v),
|
||||
},
|
||||
71 => Instruction::Decorate {
|
||||
target_id: operands[0],
|
||||
decoration: Decoration::from_num(operands[1])?,
|
||||
params: operands[2 ..].to_owned(),
|
||||
},
|
||||
72 => Instruction::MemberDecorate {
|
||||
target_id: operands[0],
|
||||
member: operands[1],
|
||||
decoration: Decoration::from_num(operands[2])?,
|
||||
params: operands[3 ..].to_owned(),
|
||||
},
|
||||
71 => Instruction::Decorate { target_id: operands[0], decoration: try!(Decoration::from_num(operands[1])), params: operands[2..].to_owned() },
|
||||
72 => Instruction::MemberDecorate { target_id: operands[0], member: operands[1], decoration: try!(Decoration::from_num(operands[2])), params: operands[3..].to_owned() },
|
||||
248 => Instruction::Label { result_id: operands[0] },
|
||||
249 => Instruction::Branch { result_id: operands[0] },
|
||||
252 => Instruction::Kill,
|
||||
@ -196,13 +333,16 @@ fn decode_instruction(opcode: u16, operands: &[u32]) -> Result<Instruction, Pars
|
||||
}
|
||||
|
||||
fn parse_string(data: &[u32]) -> (String, &[u32]) {
|
||||
let bytes = data.iter().flat_map(|&n| {
|
||||
let bytes = data.iter()
|
||||
.flat_map(|&n| {
|
||||
let b1 = (n & 0xff) as u8;
|
||||
let b2 = ((n >> 8) & 0xff) as u8;
|
||||
let b3 = ((n >> 16) & 0xff) as u8;
|
||||
let b4 = ((n >> 24) & 0xff) as u8;
|
||||
vec![b1, b2, b3, b4].into_iter()
|
||||
}).take_while(|&b| b != 0).collect::<Vec<u8>>();
|
||||
})
|
||||
.take_while(|&b| b != 0)
|
||||
.collect::<Vec<u8>>();
|
||||
|
||||
let r = 1 + bytes.len() / 4;
|
||||
let s = String::from_utf8(bytes).expect("Shader content is not UTF-8");
|
||||
|
@ -9,8 +9,8 @@
|
||||
|
||||
use std::mem;
|
||||
|
||||
use parse;
|
||||
use enums;
|
||||
use parse;
|
||||
|
||||
/// Translates all the structs that are contained in the SPIR-V document as Rust structs.
|
||||
pub fn write_structs(doc: &parse::Spirv) -> String {
|
||||
@ -18,12 +18,15 @@ pub fn write_structs(doc: &parse::Spirv) -> String {
|
||||
|
||||
for instruction in &doc.instructions {
|
||||
match *instruction {
|
||||
parse::Instruction::TypeStruct { result_id, ref member_types } => {
|
||||
parse::Instruction::TypeStruct {
|
||||
result_id,
|
||||
ref member_types,
|
||||
} => {
|
||||
let (s, _) = write_struct(doc, result_id, member_types);
|
||||
result.push_str(&s);
|
||||
result.push_str("\n");
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,7 +37,7 @@ pub fn write_structs(doc: &parse::Spirv) -> String {
|
||||
struct Member {
|
||||
name: String,
|
||||
value: String,
|
||||
offset: Option<usize>
|
||||
offset: Option<usize>,
|
||||
}
|
||||
|
||||
impl Member {
|
||||
@ -76,31 +79,38 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
|
||||
}
|
||||
|
||||
// Finding offset of the current member, as requested by the SPIR-V code.
|
||||
let spirv_offset = doc.instructions.iter().filter_map(|i| {
|
||||
let spirv_offset = doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| {
|
||||
match *i {
|
||||
parse::Instruction::MemberDecorate { target_id, member,
|
||||
parse::Instruction::MemberDecorate {
|
||||
target_id,
|
||||
member,
|
||||
decoration: enums::Decoration::DecorationOffset,
|
||||
ref params } if target_id == struct_id &&
|
||||
member as usize == num =>
|
||||
{
|
||||
ref params,
|
||||
} if target_id == struct_id && member as usize == num => {
|
||||
return Some(params[0]);
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
};
|
||||
|
||||
None
|
||||
}).next();
|
||||
})
|
||||
.next();
|
||||
|
||||
// Some structs don't have `Offset` decorations, in the case they are used as local
|
||||
// variables only. Ignoring these.
|
||||
let spirv_offset = match spirv_offset {
|
||||
Some(o) => o as usize,
|
||||
None => return (String::new(), None) // TODO: shouldn't we return and let the caller ignore it instead?
|
||||
None => return (String::new(), None), // TODO: shouldn't we return and let the caller ignore it instead?
|
||||
};
|
||||
|
||||
// We need to add a dummy field if necessary.
|
||||
{
|
||||
let current_rust_offset = current_rust_offset.as_mut().expect("Found runtime-sized member in non-final position");
|
||||
let current_rust_offset =
|
||||
current_rust_offset
|
||||
.as_mut()
|
||||
.expect("Found runtime-sized member in non-final position");
|
||||
|
||||
// Updating current_rust_offset to take the alignment of the next field into account
|
||||
*current_rust_offset = if *current_rust_offset == 0 {
|
||||
@ -111,7 +121,8 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
|
||||
|
||||
if spirv_offset != *current_rust_offset {
|
||||
let diff = spirv_offset.checked_sub(*current_rust_offset).unwrap();
|
||||
let padding_num = next_padding_num; next_padding_num += 1;
|
||||
let padding_num = next_padding_num;
|
||||
next_padding_num += 1;
|
||||
rust_members.push(Member {
|
||||
name: format!("_dummy{}", padding_num),
|
||||
value: format!("[u8; {}]", diff),
|
||||
@ -136,33 +147,39 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
|
||||
}
|
||||
|
||||
// Try determine the total size of the struct in order to add padding at the end of the struct.
|
||||
let spirv_req_total_size = doc.instructions.iter().filter_map(|i| {
|
||||
match *i {
|
||||
parse::Instruction::Decorate { target_id,
|
||||
let spirv_req_total_size = doc.instructions
|
||||
.iter()
|
||||
.filter_map(|i| match *i {
|
||||
parse::Instruction::Decorate {
|
||||
target_id,
|
||||
decoration: enums::Decoration::DecorationArrayStride,
|
||||
ref params } =>
|
||||
{
|
||||
ref params,
|
||||
} => {
|
||||
for inst in doc.instructions.iter() {
|
||||
match *inst {
|
||||
parse::Instruction::TypeArray { result_id, type_id, .. }
|
||||
if result_id == target_id && type_id == struct_id =>
|
||||
{
|
||||
parse::Instruction::TypeArray {
|
||||
result_id, type_id, ..
|
||||
} if result_id == target_id && type_id == struct_id => {
|
||||
return Some(params[0]);
|
||||
},
|
||||
parse::Instruction::TypeRuntimeArray { result_id, type_id }
|
||||
if result_id == target_id && type_id == struct_id =>
|
||||
{
|
||||
if result_id == target_id && type_id == struct_id => {
|
||||
return Some(params[0]);
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
},
|
||||
_ => None
|
||||
}
|
||||
}).fold(None, |a, b| if let Some(a) = a { assert_eq!(a, b); Some(a) } else { Some(b) });
|
||||
_ => None,
|
||||
})
|
||||
.fold(None, |a, b| if let Some(a) = a {
|
||||
assert_eq!(a, b);
|
||||
Some(a)
|
||||
} else {
|
||||
Some(b)
|
||||
});
|
||||
|
||||
// Adding the final padding members.
|
||||
if let (Some(cur_size), Some(req_size)) = (current_rust_offset, spirv_req_total_size) {
|
||||
@ -178,32 +195,50 @@ fn write_struct(doc: &parse::Spirv, struct_id: u32, members: &[u32]) -> (String,
|
||||
|
||||
// We can only implement Clone if there's no unsized member in the struct.
|
||||
let (impl_text, derive_text) = if current_rust_offset.is_some() {
|
||||
let i = format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n \
|
||||
{name} {{\n{copies}\n }}\n }}\n}}\n", name = name,
|
||||
copies = rust_members.iter().map(Member::copy_text).collect::<Vec<_>>().join(",\n"));
|
||||
let i =
|
||||
format!("\nimpl Clone for {name} {{\n fn clone(&self) -> Self {{\n {name} \
|
||||
{{\n{copies}\n }}\n }}\n}}\n",
|
||||
name = name,
|
||||
copies = rust_members
|
||||
.iter()
|
||||
.map(Member::copy_text)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",\n"));
|
||||
(i, "#[derive(Copy)]")
|
||||
} else {
|
||||
("".to_owned(), "")
|
||||
};
|
||||
|
||||
let s = format!("#[repr(C)]{derive_text}\npub struct {name} {{\n{members}\n}} /* total_size: {t:?} */\n{impl_text}",
|
||||
let s = format!("#[repr(C)]{derive_text}\npub struct {name} {{\n{members}\n}} /* total_size: \
|
||||
{t:?} */\n{impl_text}",
|
||||
name = name,
|
||||
members = rust_members.iter().map(Member::declaration_text).collect::<Vec<_>>().join(",\n"),
|
||||
t = spirv_req_total_size, impl_text = impl_text, derive_text = derive_text);
|
||||
(s, spirv_req_total_size.map(|sz| sz as usize).or(current_rust_offset))
|
||||
members = rust_members
|
||||
.iter()
|
||||
.map(Member::declaration_text)
|
||||
.collect::<Vec<_>>()
|
||||
.join(",\n"),
|
||||
t = spirv_req_total_size,
|
||||
impl_text = impl_text,
|
||||
derive_text = derive_text);
|
||||
(s,
|
||||
spirv_req_total_size
|
||||
.map(|sz| sz as usize)
|
||||
.or(current_rust_offset))
|
||||
}
|
||||
|
||||
/// Returns true if a `BuiltIn` decorator is applied on a struct member.
|
||||
fn is_builtin_member(doc: &parse::Spirv, id: u32, member_id: u32) -> bool {
|
||||
for instruction in &doc.instructions {
|
||||
match *instruction {
|
||||
parse::Instruction::MemberDecorate { target_id, member,
|
||||
parse::Instruction::MemberDecorate {
|
||||
target_id,
|
||||
member,
|
||||
decoration: enums::Decoration::DecorationBuiltIn,
|
||||
.. } if target_id == id && member == member_id =>
|
||||
{
|
||||
..
|
||||
} if target_id == id && member == member_id => {
|
||||
return true;
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
@ -217,103 +252,182 @@ pub fn type_from_id(doc: &parse::Spirv, searched: u32) -> (String, Option<usize>
|
||||
for instruction in doc.instructions.iter() {
|
||||
match instruction {
|
||||
&parse::Instruction::TypeBool { result_id } if result_id == searched => {
|
||||
#[repr(C)] struct Foo { data: bool, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: bool,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("bool".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("bool".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
&parse::Instruction::TypeInt { result_id, width, signedness } if result_id == searched => {
|
||||
&parse::Instruction::TypeInt {
|
||||
result_id,
|
||||
width,
|
||||
signedness,
|
||||
} if result_id == searched => {
|
||||
match (width, signedness) {
|
||||
(8, true) => {
|
||||
#[repr(C)] struct Foo { data: i8, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: i8,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("i8".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("i8".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
(8, false) => {
|
||||
#[repr(C)] struct Foo { data: u8, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: u8,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("u8".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("u8".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
(16, true) => {
|
||||
#[repr(C)] struct Foo { data: i16, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: i16,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("i16".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("i16".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
(16, false) => {
|
||||
#[repr(C)] struct Foo { data: u16, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: u16,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("u16".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("u16".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
(32, true) => {
|
||||
#[repr(C)] struct Foo { data: i32, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: i32,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("i32".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("i32".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
(32, false) => {
|
||||
#[repr(C)] struct Foo { data: u32, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: u32,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("u32".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("u32".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
(64, true) => {
|
||||
#[repr(C)] struct Foo { data: i64, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: i64,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("i64".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("i64".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
(64, false) => {
|
||||
#[repr(C)] struct Foo { data: u64, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: u64,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("u64".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("u64".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
_ => panic!("No Rust equivalent for an integer of width {}", width)
|
||||
_ => panic!("No Rust equivalent for an integer of width {}", width),
|
||||
}
|
||||
},
|
||||
&parse::Instruction::TypeFloat { result_id, width } if result_id == searched => {
|
||||
match width {
|
||||
32 => {
|
||||
#[repr(C)] struct Foo { data: f32, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: f32,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("f32".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("f32".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
64 => {
|
||||
#[repr(C)] struct Foo { data: f64, after: u8 }
|
||||
#[repr(C)]
|
||||
struct Foo {
|
||||
data: f64,
|
||||
after: u8,
|
||||
}
|
||||
let size = unsafe { (&(&*(0 as *const Foo)).after) as *const u8 as usize };
|
||||
return ("f64".to_owned(), Some(size), mem::align_of::<Foo>())
|
||||
return ("f64".to_owned(), Some(size), mem::align_of::<Foo>());
|
||||
},
|
||||
_ => panic!("No Rust equivalent for a floating-point of width {}", width)
|
||||
_ => panic!("No Rust equivalent for a floating-point of width {}", width),
|
||||
}
|
||||
},
|
||||
&parse::Instruction::TypeVector { result_id, component_id, count } if result_id == searched => {
|
||||
&parse::Instruction::TypeVector {
|
||||
result_id,
|
||||
component_id,
|
||||
count,
|
||||
} if result_id == searched => {
|
||||
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
|
||||
let (t, t_size, t_align) = type_from_id(doc, component_id);
|
||||
return (format!("[{}; {}]", t, count), t_size.map(|s| s * count as usize), t_align);
|
||||
},
|
||||
&parse::Instruction::TypeMatrix { result_id, column_type_id, column_count } if result_id == searched => {
|
||||
&parse::Instruction::TypeMatrix {
|
||||
result_id,
|
||||
column_type_id,
|
||||
column_count,
|
||||
} if result_id == searched => {
|
||||
// FIXME: row-major or column-major
|
||||
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
|
||||
let (t, t_size, t_align) = type_from_id(doc, column_type_id);
|
||||
return (format!("[{}; {}]", t, column_count), t_size.map(|s| s * column_count as usize), t_align);
|
||||
return (format!("[{}; {}]", t, column_count),
|
||||
t_size.map(|s| s * column_count as usize),
|
||||
t_align);
|
||||
},
|
||||
&parse::Instruction::TypeArray { result_id, type_id, length_id } if result_id == searched => {
|
||||
&parse::Instruction::TypeArray {
|
||||
result_id,
|
||||
type_id,
|
||||
length_id,
|
||||
} if result_id == searched => {
|
||||
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
|
||||
let (t, t_size, t_align) = type_from_id(doc, type_id);
|
||||
let len = doc.instructions.iter().filter_map(|e| {
|
||||
match e { &parse::Instruction::Constant { result_id, ref data, .. } if result_id == length_id => Some(data.clone()), _ => None }
|
||||
}).next().expect("failed to find array length");
|
||||
let len = doc.instructions
|
||||
.iter()
|
||||
.filter_map(|e| match e {
|
||||
&parse::Instruction::Constant {
|
||||
result_id,
|
||||
ref data,
|
||||
..
|
||||
} if result_id == length_id => Some(data.clone()),
|
||||
_ => None,
|
||||
})
|
||||
.next()
|
||||
.expect("failed to find array length");
|
||||
let len = len.iter().rev().fold(0u64, |a, &b| (a << 32) | b as u64);
|
||||
return (format!("[{}; {}]", t, len), t_size.map(|s| s * len as usize), t_align); // FIXME:
|
||||
},
|
||||
&parse::Instruction::TypeRuntimeArray { result_id, type_id } if result_id == searched => {
|
||||
&parse::Instruction::TypeRuntimeArray { result_id, type_id }
|
||||
if result_id == searched => {
|
||||
debug_assert_eq!(mem::align_of::<[u32; 3]>(), mem::align_of::<u32>());
|
||||
let (t, _, t_align) = type_from_id(doc, type_id);
|
||||
return (format!("[{}]", t), None, t_align);
|
||||
},
|
||||
&parse::Instruction::TypeStruct { result_id, ref member_types } if result_id == searched => {
|
||||
&parse::Instruction::TypeStruct {
|
||||
result_id,
|
||||
ref member_types,
|
||||
} if result_id == searched => {
|
||||
// TODO: take the Offset member decorate into account?
|
||||
let name = ::name_from_id(doc, result_id);
|
||||
let (_, size) = write_struct(doc, result_id, member_types);
|
||||
let align = member_types.iter().map(|&t| type_from_id(doc, t).2).max().unwrap_or(1);
|
||||
let align = member_types
|
||||
.iter()
|
||||
.map(|&t| type_from_id(doc, t).2)
|
||||
.max()
|
||||
.unwrap_or(1);
|
||||
return (name, size, align);
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,13 +22,13 @@ use winit::{EventsLoop, WindowBuilder};
|
||||
use winit::CreationError as WindowCreationError;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
use objc::runtime::{YES};
|
||||
use cocoa::appkit::{NSView, NSWindow};
|
||||
#[cfg(target_os = "macos")]
|
||||
use cocoa::base::id as cocoa_id;
|
||||
#[cfg(target_os = "macos")]
|
||||
use cocoa::appkit::{NSWindow, NSView};
|
||||
#[cfg(target_os = "macos")]
|
||||
use metal::*;
|
||||
#[cfg(target_os = "macos")]
|
||||
use objc::runtime::YES;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
use std::mem;
|
||||
@ -49,18 +49,20 @@ pub fn required_extensions() -> InstanceExtensions {
|
||||
|
||||
match InstanceExtensions::supported_by_core() {
|
||||
Ok(supported) => supported.intersection(&ideal),
|
||||
Err(_) => InstanceExtensions::none()
|
||||
Err(_) => InstanceExtensions::none(),
|
||||
}
|
||||
}
|
||||
|
||||
pub trait VkSurfaceBuild {
|
||||
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>) -> Result<Window, CreationError>;
|
||||
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>)
|
||||
-> Result<Window, CreationError>;
|
||||
}
|
||||
|
||||
impl VkSurfaceBuild for WindowBuilder {
|
||||
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>) -> Result<Window, CreationError> {
|
||||
let window = try!(self.build(events_loop));
|
||||
let surface = try!(unsafe { winit_to_surface(instance, &window) });
|
||||
fn build_vk_surface(self, events_loop: &EventsLoop, instance: Arc<Instance>)
|
||||
-> Result<Window, CreationError> {
|
||||
let window = self.build(events_loop)?;
|
||||
let surface = unsafe { winit_to_surface(instance, &window) }?;
|
||||
|
||||
Ok(Window {
|
||||
window: window,
|
||||
@ -135,16 +137,14 @@ impl From<WindowCreationError> for CreationError {
|
||||
}
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
unsafe fn winit_to_surface(instance: Arc<Instance>,
|
||||
win: &winit::Window)
|
||||
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
use winit::os::android::WindowExt;
|
||||
Surface::from_anativewindow(instance, win.get_native_window())
|
||||
}
|
||||
|
||||
#[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
|
||||
unsafe fn winit_to_surface(instance: Arc<Instance>,
|
||||
win: &winit::Window)
|
||||
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
use winit::os::unix::WindowExt;
|
||||
match (win.get_wayland_display(), win.get_wayland_surface()) {
|
||||
@ -161,13 +161,12 @@ unsafe fn winit_to_surface(instance: Arc<Instance>,
|
||||
win.get_xcb_connection().unwrap(),
|
||||
win.get_xlib_window().unwrap() as _)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
unsafe fn winit_to_surface(instance: Arc<Instance>,
|
||||
win: &winit::Window)
|
||||
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
use winit::os::windows::WindowExt;
|
||||
Surface::from_hwnd(instance,
|
||||
@ -177,8 +176,7 @@ unsafe fn winit_to_surface(instance: Arc<Instance>,
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
unsafe fn winit_to_surface(instance: Arc<Instance>, win: &winit::Window)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
use winit::os::macos::WindowExt;
|
||||
|
||||
unsafe {
|
||||
|
@ -12,7 +12,8 @@ use std::env;
|
||||
fn main() {
|
||||
let target = env::var("TARGET").unwrap();
|
||||
if target.contains("apple-darwin") {
|
||||
println!("cargo:rustc-link-search=framework={}", "/Library/Frameworks"); // TODO: necessary?
|
||||
println!("cargo:rustc-link-search=framework={}",
|
||||
"/Library/Frameworks"); // TODO: necessary?
|
||||
println!("cargo:rustc-link-lib=c++");
|
||||
println!("cargo:rustc-link-lib=framework=MoltenVK");
|
||||
println!("cargo:rustc-link-lib=framework=QuartzCore");
|
||||
|
@ -16,6 +16,7 @@
|
||||
//! You can read the buffer multiple times simultaneously. Trying to read and write simultaneously,
|
||||
//! or write and write simultaneously will block.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
@ -26,12 +27,11 @@ use std::sync::RwLock;
|
||||
use std::sync::RwLockReadGuard;
|
||||
use std::sync::RwLockWriteGuard;
|
||||
use std::sync::TryLockError;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use buffer::BufferUsage;
|
||||
use buffer::sys::BufferCreationError;
|
||||
use buffer::sys::SparseLevel;
|
||||
use buffer::sys::UnsafeBuffer;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::traits::BufferAccess;
|
||||
use buffer::traits::BufferInner;
|
||||
use buffer::traits::TypedBufferAccess;
|
||||
@ -46,15 +46,17 @@ use memory::pool::MemoryPool;
|
||||
use memory::pool::MemoryPoolAlloc;
|
||||
use memory::pool::StdMemoryPool;
|
||||
use sync::AccessError;
|
||||
use sync::Sharing;
|
||||
use sync::AccessFlagBits;
|
||||
use sync::PipelineStages;
|
||||
use sync::Sharing;
|
||||
|
||||
use OomError;
|
||||
|
||||
/// Buffer whose content is accessible by the CPU.
|
||||
#[derive(Debug)]
|
||||
pub struct CpuAccessibleBuffer<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool {
|
||||
pub struct CpuAccessibleBuffer<T: ?Sized, A = Arc<StdMemoryPool>>
|
||||
where A: MemoryPool
|
||||
{
|
||||
// Inner content.
|
||||
inner: UnsafeBuffer,
|
||||
|
||||
@ -80,21 +82,18 @@ impl<T> CpuAccessibleBuffer<T> {
|
||||
-> Result<Arc<CpuAccessibleBuffer<T>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
|
||||
}
|
||||
unsafe { CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families) }
|
||||
}
|
||||
|
||||
/// Builds a new buffer with some data in it. Only allowed for sized data.
|
||||
pub fn from_data<'a, I>(device: Arc<Device>, usage: BufferUsage, queue_families: I, data: T)
|
||||
-> Result<Arc<CpuAccessibleBuffer<T>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
T: Content + 'static,
|
||||
T: Content + 'static
|
||||
{
|
||||
unsafe {
|
||||
let uninitialized = try!(
|
||||
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
|
||||
);
|
||||
let uninitialized =
|
||||
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)?;
|
||||
|
||||
// Note that we are in panic-unsafety land here. However a panic should never ever
|
||||
// happen here, so in theory we are safe.
|
||||
@ -129,9 +128,10 @@ impl<T> CpuAccessibleBuffer<[T]> {
|
||||
Q: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
let uninitialized = try!(
|
||||
CpuAccessibleBuffer::uninitialized_array(device, data.len(), usage, queue_families)
|
||||
);
|
||||
let uninitialized = CpuAccessibleBuffer::uninitialized_array(device,
|
||||
data.len(),
|
||||
usage,
|
||||
queue_families)?;
|
||||
|
||||
// Note that we are in panic-unsafety land here. However a panic should never ever
|
||||
// happen here, so in theory we are safe.
|
||||
@ -157,9 +157,7 @@ impl<T> CpuAccessibleBuffer<[T]> {
|
||||
-> Result<Arc<CpuAccessibleBuffer<[T]>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
CpuAccessibleBuffer::uninitialized_array(device, len, usage, queue_families)
|
||||
}
|
||||
unsafe { CpuAccessibleBuffer::uninitialized_array(device, len, usage, queue_families) }
|
||||
}
|
||||
|
||||
/// Builds a new buffer. Can be used for arrays.
|
||||
@ -180,11 +178,14 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
|
||||
///
|
||||
/// You must ensure that the size that you pass is correct for `T`.
|
||||
///
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage,
|
||||
queue_families: I)
|
||||
-> Result<Arc<CpuAccessibleBuffer<T>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id())
|
||||
let queue_families = queue_families
|
||||
.into_iter()
|
||||
.map(|f| f.id())
|
||||
.collect::<SmallVec<[u32; 4]>>();
|
||||
|
||||
let (buffer, mem_reqs) = {
|
||||
@ -197,21 +198,27 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
|
||||
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::OomError(err)) => return Err(err),
|
||||
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
|
||||
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
|
||||
// errors can't happen
|
||||
}
|
||||
};
|
||||
|
||||
let mem_ty = device.physical_device().memory_types()
|
||||
let mem_ty = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|t| t.is_host_visible())
|
||||
.next().unwrap(); // Vk specs guarantee that this can't fail
|
||||
.next()
|
||||
.unwrap(); // Vk specs guarantee that this can't fail
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
|
||||
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
|
||||
mem_ty,
|
||||
mem_reqs.size,
|
||||
mem_reqs.alignment,
|
||||
AllocLayout::Linear)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
debug_assert!(mem.mapped_memory().is_some());
|
||||
try!(buffer.bind_memory(mem.memory(), mem.offset()));
|
||||
buffer.bind_memory(mem.memory(), mem.offset())?;
|
||||
|
||||
Ok(Arc::new(CpuAccessibleBuffer {
|
||||
inner: buffer,
|
||||
@ -223,7 +230,9 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where A: MemoryPool {
|
||||
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
/// Returns the device used to create this buffer.
|
||||
#[inline]
|
||||
pub fn device(&self) -> &Arc<Device> {
|
||||
@ -234,13 +243,22 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where A: MemoryPool {
|
||||
// TODO: use a custom iterator
|
||||
#[inline]
|
||||
pub fn queue_families(&self) -> Vec<QueueFamily> {
|
||||
self.queue_families.iter().map(|&num| {
|
||||
self.device().physical_device().queue_family_by_id(num).unwrap()
|
||||
}).collect()
|
||||
self.queue_families
|
||||
.iter()
|
||||
.map(|&num| {
|
||||
self.device()
|
||||
.physical_device()
|
||||
.queue_family_by_id(num)
|
||||
.unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where T: Content + 'static, A: MemoryPool {
|
||||
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A>
|
||||
where T: Content + 'static,
|
||||
A: MemoryPool
|
||||
{
|
||||
/// Locks the buffer in order to write its content.
|
||||
///
|
||||
/// If the buffer is currently in use by the GPU, this function will block until either the
|
||||
@ -251,7 +269,7 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where T: Content + 'static, A: Memo
|
||||
/// that uses it will block until you unlock it.
|
||||
#[inline]
|
||||
pub fn read(&self) -> Result<ReadLock<T>, TryLockError<RwLockReadGuard<()>>> {
|
||||
let lock = try!(self.access.try_read());
|
||||
let lock = self.access.try_read()?;
|
||||
|
||||
let offset = self.memory.offset();
|
||||
let range = offset .. offset + self.inner.size();
|
||||
@ -272,7 +290,7 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where T: Content + 'static, A: Memo
|
||||
/// that uses it will block until you unlock it.
|
||||
#[inline]
|
||||
pub fn write(&self) -> Result<WriteLock<T>, TryLockError<RwLockWriteGuard<()>>> {
|
||||
let lock = try!(self.access.try_write());
|
||||
let lock = self.access.try_write()?;
|
||||
|
||||
let offset = self.memory.offset();
|
||||
let range = offset .. offset + self.inner.size();
|
||||
@ -285,7 +303,8 @@ impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> where T: Content + 'static, A: Memo
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> BufferAccess for CpuAccessibleBuffer<T, A>
|
||||
where T: 'static + Send + Sync, A: MemoryPool
|
||||
where T: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner {
|
||||
@ -317,7 +336,8 @@ unsafe impl<T: ?Sized, A> BufferAccess for CpuAccessibleBuffer<T, A>
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, A> TypedBufferAccess for CpuAccessibleBuffer<T, A>
|
||||
where T: 'static + Send + Sync, A: MemoryPool
|
||||
where T: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
@ -417,7 +437,7 @@ impl<'a, T: ?Sized + 'a> DerefMut for WriteLock<'a, T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use buffer::{CpuAccessibleBuffer, BufferUsage};
|
||||
use buffer::{BufferUsage, CpuAccessibleBuffer};
|
||||
|
||||
#[test]
|
||||
fn create_empty_buffer() {
|
||||
@ -425,6 +445,9 @@ mod tests {
|
||||
|
||||
const EMPTY: [i32; 0] = [];
|
||||
|
||||
let _ = CpuAccessibleBuffer::from_data(device, BufferUsage::all(), Some(queue.family()), EMPTY.iter());
|
||||
let _ = CpuAccessibleBuffer::from_data(device,
|
||||
BufferUsage::all(),
|
||||
Some(queue.family()),
|
||||
EMPTY.iter());
|
||||
}
|
||||
}
|
||||
|
@ -7,21 +7,21 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::iter;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
use smallvec::SmallVec;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use buffer::BufferUsage;
|
||||
use buffer::sys::BufferCreationError;
|
||||
use buffer::sys::SparseLevel;
|
||||
use buffer::sys::UnsafeBuffer;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::traits::BufferAccess;
|
||||
use buffer::traits::BufferInner;
|
||||
use buffer::traits::TypedBufferAccess;
|
||||
@ -56,7 +56,9 @@ use OomError;
|
||||
/// The `CpuBufferPool` struct internally contains an `Arc`. You can clone the `CpuBufferPool` for
|
||||
/// a cheap cost, and all the clones will share the same underlying buffer.
|
||||
///
|
||||
pub struct CpuBufferPool<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool {
|
||||
pub struct CpuBufferPool<T: ?Sized, A = Arc<StdMemoryPool>>
|
||||
where A: MemoryPool
|
||||
{
|
||||
// The device of the pool.
|
||||
device: Arc<Device>,
|
||||
|
||||
@ -80,7 +82,9 @@ pub struct CpuBufferPool<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool
|
||||
}
|
||||
|
||||
// One buffer of the pool.
|
||||
struct ActualBuffer<A> where A: MemoryPool {
|
||||
struct ActualBuffer<A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
// Inner content.
|
||||
inner: UnsafeBuffer,
|
||||
|
||||
@ -111,7 +115,9 @@ struct ActualBufferSubbuffer {
|
||||
/// A subbuffer allocated from a `CpuBufferPool`.
|
||||
///
|
||||
/// When this object is destroyed, the subbuffer is automatically reclaimed by the pool.
|
||||
pub struct CpuBufferPoolSubbuffer<T: ?Sized, A> where A: MemoryPool {
|
||||
pub struct CpuBufferPoolSubbuffer<T: ?Sized, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
buffer: Arc<ActualBuffer<A>>,
|
||||
|
||||
// Index of the subbuffer within `buffer`.
|
||||
@ -130,9 +136,7 @@ impl<T> CpuBufferPool<T> {
|
||||
-> CpuBufferPool<T>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
CpuBufferPool::raw(device, mem::size_of::<T>(), usage, queue_families)
|
||||
}
|
||||
unsafe { CpuBufferPool::raw(device, mem::size_of::<T>(), usage, queue_families) }
|
||||
}
|
||||
|
||||
/// Builds a `CpuBufferPool` meant for simple uploads.
|
||||
@ -151,18 +155,19 @@ impl<T> CpuBufferPool<[T]> {
|
||||
-> CpuBufferPool<[T]>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
CpuBufferPool::raw(device, mem::size_of::<T>() * len, usage, queue_families)
|
||||
}
|
||||
unsafe { CpuBufferPool::raw(device, mem::size_of::<T>() * len, usage, queue_families) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> CpuBufferPool<T> {
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, one_size: usize,
|
||||
usage: BufferUsage, queue_families: I) -> CpuBufferPool<T>
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, one_size: usize, usage: BufferUsage,
|
||||
queue_families: I)
|
||||
-> CpuBufferPool<T>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id())
|
||||
let queue_families = queue_families
|
||||
.into_iter()
|
||||
.map(|f| f.id())
|
||||
.collect::<SmallVec<[u32; 4]>>();
|
||||
|
||||
let pool = Device::standard_pool(&device);
|
||||
@ -187,7 +192,9 @@ impl<T: ?Sized> CpuBufferPool<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
impl<T, A> CpuBufferPool<T, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
/// Sets the capacity to `capacity`, or does nothing if the capacity is already higher.
|
||||
///
|
||||
/// Since this can involve a memory allocation, an `OomError` can happen.
|
||||
@ -197,9 +204,9 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
// Check current capacity.
|
||||
match *cur_buf {
|
||||
Some(ref buf) if buf.capacity >= capacity => {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
};
|
||||
|
||||
self.reset_buf(&mut cur_buf, capacity)
|
||||
@ -229,7 +236,7 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
|
||||
match self.try_next_impl(&mut mutex, data) {
|
||||
Ok(n) => n,
|
||||
Err(_) => unreachable!()
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -246,7 +253,9 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
}
|
||||
|
||||
// Creates a new buffer and sets it as current.
|
||||
fn reset_buf(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>, capacity: usize) -> Result<(), OomError> {
|
||||
fn reset_buf(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>,
|
||||
capacity: usize)
|
||||
-> Result<(), OomError> {
|
||||
unsafe {
|
||||
let (buffer, mem_reqs) = {
|
||||
let sharing = if self.queue_families.len() >= 2 {
|
||||
@ -260,26 +269,37 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
None => return Err(OomError::OutOfDeviceMemory),
|
||||
};
|
||||
|
||||
match UnsafeBuffer::new(self.device.clone(), total_size, self.usage, sharing, SparseLevel::none()) {
|
||||
match UnsafeBuffer::new(self.device.clone(),
|
||||
total_size,
|
||||
self.usage,
|
||||
sharing,
|
||||
SparseLevel::none()) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::OomError(err)) => return Err(err),
|
||||
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
|
||||
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
|
||||
// errors can't happen
|
||||
}
|
||||
};
|
||||
|
||||
let mem_ty = self.device.physical_device().memory_types()
|
||||
let mem_ty = self.device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|t| t.is_host_visible())
|
||||
.next().unwrap(); // Vk specs guarantee that this can't fail
|
||||
.next()
|
||||
.unwrap(); // Vk specs guarantee that this can't fail
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&self.pool, mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
|
||||
let mem = MemoryPool::alloc(&self.pool,
|
||||
mem_ty,
|
||||
mem_reqs.size,
|
||||
mem_reqs.alignment,
|
||||
AllocLayout::Linear)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
debug_assert!(mem.mapped_memory().is_some());
|
||||
try!(buffer.bind_memory(mem.memory(), mem.offset()));
|
||||
buffer.bind_memory(mem.memory(), mem.offset())?;
|
||||
|
||||
**cur_buf_mutex = Some(Arc::new(ActualBuffer {
|
||||
**cur_buf_mutex =
|
||||
Some(Arc::new(ActualBuffer {
|
||||
inner: buffer,
|
||||
memory: mem,
|
||||
subbuffers: {
|
||||
@ -302,12 +322,11 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
|
||||
// Tries to lock a subbuffer from the current buffer.
|
||||
fn try_next_impl(&self, cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>, data: T)
|
||||
-> Result<CpuBufferPoolSubbuffer<T, A>, T>
|
||||
{
|
||||
-> Result<CpuBufferPoolSubbuffer<T, A>, T> {
|
||||
// Grab the current buffer. Return `Err` if the pool wasn't "initialized" yet.
|
||||
let current_buffer = match cur_buf_mutex.clone() {
|
||||
Some(b) => b,
|
||||
None => return Err(data)
|
||||
None => return Err(data),
|
||||
};
|
||||
|
||||
// Grab the next subbuffer to use.
|
||||
@ -315,24 +334,35 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
// Since the only place that touches `next_subbuffer` is this code, and since we own a
|
||||
// mutex lock to the buffer, it means that `next_subbuffer` can't be accessed
|
||||
// concurrently.
|
||||
let val = current_buffer.next_subbuffer.fetch_add(1, Ordering::Relaxed);
|
||||
let val = current_buffer
|
||||
.next_subbuffer
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
// TODO: handle overflows?
|
||||
// TODO: rewrite this in a proper way by holding an intermediary struct in the mutex instead of the Arc directly
|
||||
val % current_buffer.capacity
|
||||
};
|
||||
|
||||
// Check if subbuffer is already taken. If so, the pool is full.
|
||||
if current_buffer.subbuffers[next_subbuffer].num_cpu_accesses.compare_and_swap(0, 1, Ordering::SeqCst) != 0 {
|
||||
if current_buffer.subbuffers[next_subbuffer]
|
||||
.num_cpu_accesses
|
||||
.compare_and_swap(0, 1, Ordering::SeqCst) != 0
|
||||
{
|
||||
return Err(data);
|
||||
}
|
||||
|
||||
// Reset num_gpu_accesses.
|
||||
current_buffer.subbuffers[next_subbuffer].num_gpu_accesses.store(0, Ordering::SeqCst);
|
||||
current_buffer.subbuffers[next_subbuffer]
|
||||
.num_gpu_accesses
|
||||
.store(0, Ordering::SeqCst);
|
||||
|
||||
// Write `data` in the memory.
|
||||
unsafe {
|
||||
let range = (next_subbuffer * self.one_size) .. ((next_subbuffer + 1) * self.one_size);
|
||||
let mut mapping = current_buffer.memory.mapped_memory().unwrap().read_write(range);
|
||||
let mut mapping = current_buffer
|
||||
.memory
|
||||
.mapped_memory()
|
||||
.unwrap()
|
||||
.read_write(range);
|
||||
*mapping = data;
|
||||
}
|
||||
|
||||
@ -346,7 +376,9 @@ impl<T, A> CpuBufferPool<T, A> where A: MemoryPool {
|
||||
}
|
||||
|
||||
// Can't automatically derive `Clone`, otherwise the compiler adds a `T: Clone` requirement.
|
||||
impl<T: ?Sized, A> Clone for CpuBufferPool<T, A> where A: MemoryPool + Clone {
|
||||
impl<T: ?Sized, A> Clone for CpuBufferPool<T, A>
|
||||
where A: MemoryPool + Clone
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
let buf = self.current_buffer.lock().unwrap();
|
||||
|
||||
@ -371,9 +403,13 @@ unsafe impl<T: ?Sized, A> DeviceOwned for CpuBufferPool<T, A>
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, A> Clone for CpuBufferPoolSubbuffer<T, A> where A: MemoryPool {
|
||||
impl<T: ?Sized, A> Clone for CpuBufferPoolSubbuffer<T, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
fn clone(&self) -> CpuBufferPoolSubbuffer<T, A> {
|
||||
let old_val = self.buffer.subbuffers[self.subbuffer_index].num_cpu_accesses.fetch_add(1, Ordering::SeqCst);
|
||||
let old_val = self.buffer.subbuffers[self.subbuffer_index]
|
||||
.num_cpu_accesses
|
||||
.fetch_add(1, Ordering::SeqCst);
|
||||
debug_assert!(old_val >= 1);
|
||||
|
||||
CpuBufferPoolSubbuffer {
|
||||
@ -458,8 +494,8 @@ unsafe impl<T: ?Sized, A> DeviceOwned for CpuBufferPoolSubbuffer<T, A>
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::mem;
|
||||
use buffer::CpuBufferPool;
|
||||
use std::mem;
|
||||
|
||||
#[test]
|
||||
fn basic_create() {
|
||||
|
@ -13,16 +13,16 @@
|
||||
//! You can read the buffer multiple times simultaneously from multiple queues. Trying to read and
|
||||
//! write simultaneously, or write and write simultaneously will block with a semaphore.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use buffer::BufferUsage;
|
||||
use buffer::sys::BufferCreationError;
|
||||
use buffer::sys::SparseLevel;
|
||||
use buffer::sys::UnsafeBuffer;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::traits::BufferAccess;
|
||||
use buffer::traits::BufferInner;
|
||||
use buffer::traits::TypedBufferAccess;
|
||||
@ -42,7 +42,9 @@ use SafeDeref;
|
||||
|
||||
/// Buffer whose content is accessible by the CPU.
|
||||
#[derive(Debug)]
|
||||
pub struct DeviceLocalBuffer<T: ?Sized, A = Arc<StdMemoryPool>> where A: MemoryPool {
|
||||
pub struct DeviceLocalBuffer<T: ?Sized, A = Arc<StdMemoryPool>>
|
||||
where A: MemoryPool
|
||||
{
|
||||
// Inner content.
|
||||
inner: UnsafeBuffer,
|
||||
|
||||
@ -66,9 +68,7 @@ impl<T> DeviceLocalBuffer<T> {
|
||||
-> Result<Arc<DeviceLocalBuffer<T>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
DeviceLocalBuffer::raw(device, mem::size_of::<T>(), usage, queue_families)
|
||||
}
|
||||
unsafe { DeviceLocalBuffer::raw(device, mem::size_of::<T>(), usage, queue_families) }
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,9 +79,7 @@ impl<T> DeviceLocalBuffer<[T]> {
|
||||
-> Result<Arc<DeviceLocalBuffer<[T]>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
unsafe {
|
||||
DeviceLocalBuffer::raw(device, len * mem::size_of::<T>(), usage, queue_families)
|
||||
}
|
||||
unsafe { DeviceLocalBuffer::raw(device, len * mem::size_of::<T>(), usage, queue_families) }
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,11 +90,14 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
|
||||
///
|
||||
/// You must ensure that the size that you pass is correct for `T`.
|
||||
///
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage,
|
||||
queue_families: I)
|
||||
-> Result<Arc<DeviceLocalBuffer<T>>, OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id())
|
||||
let queue_families = queue_families
|
||||
.into_iter()
|
||||
.map(|f| f.id())
|
||||
.collect::<SmallVec<[u32; 4]>>();
|
||||
|
||||
let (buffer, mem_reqs) = {
|
||||
@ -109,24 +110,31 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
|
||||
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::OomError(err)) => return Err(err),
|
||||
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
|
||||
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
|
||||
// errors can't happen
|
||||
}
|
||||
};
|
||||
|
||||
let mem_ty = {
|
||||
let device_local = device.physical_device().memory_types()
|
||||
let device_local = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|t| t.is_device_local());
|
||||
let any = device.physical_device().memory_types()
|
||||
let any = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
|
||||
device_local.chain(any).next().unwrap()
|
||||
};
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
|
||||
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
|
||||
mem_ty,
|
||||
mem_reqs.size,
|
||||
mem_reqs.alignment,
|
||||
AllocLayout::Linear)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
try!(buffer.bind_memory(mem.memory(), mem.offset()));
|
||||
buffer.bind_memory(mem.memory(), mem.offset())?;
|
||||
|
||||
Ok(Arc::new(DeviceLocalBuffer {
|
||||
inner: buffer,
|
||||
@ -138,7 +146,9 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, A> DeviceLocalBuffer<T, A> where A: MemoryPool {
|
||||
impl<T: ?Sized, A> DeviceLocalBuffer<T, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
/// Returns the device used to create this buffer.
|
||||
#[inline]
|
||||
pub fn device(&self) -> &Arc<Device> {
|
||||
@ -149,9 +159,15 @@ impl<T: ?Sized, A> DeviceLocalBuffer<T, A> where A: MemoryPool {
|
||||
// TODO: use a custom iterator
|
||||
#[inline]
|
||||
pub fn queue_families(&self) -> Vec<QueueFamily> {
|
||||
self.queue_families.iter().map(|&num| {
|
||||
self.device().physical_device().queue_family_by_id(num).unwrap()
|
||||
}).collect()
|
||||
self.queue_families
|
||||
.iter()
|
||||
.map(|&num| {
|
||||
self.device()
|
||||
.physical_device()
|
||||
.queue_family_by_id(num)
|
||||
.unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,24 +18,24 @@
|
||||
//! The buffer will be stored in device-local memory if possible
|
||||
//!
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::iter;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use buffer::BufferUsage;
|
||||
use buffer::CpuAccessibleBuffer;
|
||||
use buffer::sys::BufferCreationError;
|
||||
use buffer::sys::SparseLevel;
|
||||
use buffer::sys::UnsafeBuffer;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::traits::BufferAccess;
|
||||
use buffer::traits::BufferInner;
|
||||
use buffer::traits::TypedBufferAccess;
|
||||
use command_buffer::AutoCommandBufferBuilder;
|
||||
use command_buffer::AutoCommandBuffer;
|
||||
use command_buffer::AutoCommandBufferBuilder;
|
||||
use command_buffer::CommandBuffer;
|
||||
use command_buffer::CommandBufferExecFuture;
|
||||
use device::Device;
|
||||
@ -86,13 +86,16 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
|
||||
/// either submit your operation after this future, or execute this future and wait for it to
|
||||
/// be finished before submitting your own operation.
|
||||
pub fn from_data<'a, I>(data: T, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
|
||||
pub fn from_data<'a, I>(
|
||||
data: T, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
T: 'static + Send + Sync + Sized,
|
||||
T: 'static + Send + Sync + Sized
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_data(queue.device().clone(), BufferUsage::transfer_source(),
|
||||
iter::once(queue.family()), data)?;
|
||||
let source = CpuAccessibleBuffer::from_data(queue.device().clone(),
|
||||
BufferUsage::transfer_source(),
|
||||
iter::once(queue.family()),
|
||||
data)?;
|
||||
ImmutableBuffer::from_buffer(source, usage, queue_families, queue)
|
||||
}
|
||||
|
||||
@ -102,11 +105,12 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
|
||||
/// either submit your operation after this future, or execute this future and wait for it to
|
||||
/// be finished before submitting your own operation.
|
||||
pub fn from_buffer<'a, B, I>(source: B, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
|
||||
pub fn from_buffer<'a, B, I>(
|
||||
source: B, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), OomError>
|
||||
where B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync,
|
||||
I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
T: 'static + Send + Sync,
|
||||
T: 'static + Send + Sync
|
||||
{
|
||||
unsafe {
|
||||
// We automatically set `transfer_dest` to true in order to avoid annoying errors.
|
||||
@ -115,8 +119,10 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
..usage
|
||||
};
|
||||
|
||||
let (buffer, init) = ImmutableBuffer::raw(source.device().clone(), source.size(),
|
||||
actual_usage, queue_families)?;
|
||||
let (buffer, init) = ImmutableBuffer::raw(source.device().clone(),
|
||||
source.size(),
|
||||
actual_usage,
|
||||
queue_families)?;
|
||||
|
||||
let cb = AutoCommandBufferBuilder::new(source.device().clone(), queue.family())?
|
||||
.copy_buffer(source, init).unwrap() // TODO: return error?
|
||||
@ -124,7 +130,7 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
|
||||
let future = match cb.execute(queue) {
|
||||
Ok(f) => f,
|
||||
Err(_) => unreachable!()
|
||||
Err(_) => unreachable!(),
|
||||
};
|
||||
|
||||
Ok((buffer, future))
|
||||
@ -150,7 +156,8 @@ impl<T> ImmutableBuffer<T> {
|
||||
/// data, otherwise the content is undefined.
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn uninitialized<'a, I>(device: Arc<Device>, usage: BufferUsage, queue_families: I)
|
||||
pub unsafe fn uninitialized<'a, I>(
|
||||
device: Arc<Device>, usage: BufferUsage, queue_families: I)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
@ -159,14 +166,17 @@ impl<T> ImmutableBuffer<T> {
|
||||
}
|
||||
|
||||
impl<T> ImmutableBuffer<[T]> {
|
||||
pub fn from_iter<'a, D, I>(data: D, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
|
||||
pub fn from_iter<'a, D, I>(
|
||||
data: D, usage: BufferUsage, queue_families: I, queue: Arc<Queue>)
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
D: ExactSizeIterator<Item = T>,
|
||||
T: 'static + Send + Sync + Sized,
|
||||
T: 'static + Send + Sync + Sized
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_iter(queue.device().clone(), BufferUsage::transfer_source(),
|
||||
iter::once(queue.family()), data)?;
|
||||
let source = CpuAccessibleBuffer::from_iter(queue.device().clone(),
|
||||
BufferUsage::transfer_source(),
|
||||
iter::once(queue.family()),
|
||||
data)?;
|
||||
ImmutableBuffer::from_buffer(source, usage, queue_families, queue)
|
||||
}
|
||||
|
||||
@ -187,8 +197,8 @@ impl<T> ImmutableBuffer<[T]> {
|
||||
/// data, otherwise the content is undefined.
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn uninitialized_array<'a, I>(device: Arc<Device>, len: usize, usage: BufferUsage,
|
||||
queue_families: I)
|
||||
pub unsafe fn uninitialized_array<'a, I>(
|
||||
device: Arc<Device>, len: usize, usage: BufferUsage, queue_families: I)
|
||||
-> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferInitialization<[T]>), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
@ -213,7 +223,8 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
/// data.
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
|
||||
pub unsafe fn raw<'a, I>(
|
||||
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: I)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
|
||||
where I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
@ -223,10 +234,9 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
|
||||
// Internal implementation of `raw`. This is separated from `raw` so that it doesn't need to be
|
||||
// inlined.
|
||||
unsafe fn raw_impl(device: Arc<Device>, size: usize, usage: BufferUsage,
|
||||
queue_families: SmallVec<[u32; 4]>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError>
|
||||
{
|
||||
unsafe fn raw_impl(
|
||||
device: Arc<Device>, size: usize, usage: BufferUsage, queue_families: SmallVec<[u32; 4]>)
|
||||
-> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), OomError> {
|
||||
let (buffer, mem_reqs) = {
|
||||
let sharing = if queue_families.len() >= 2 {
|
||||
Sharing::Concurrent(queue_families.iter().cloned())
|
||||
@ -237,24 +247,31 @@ impl<T: ?Sized> ImmutableBuffer<T> {
|
||||
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::OomError(err)) => return Err(err),
|
||||
Err(_) => unreachable!() // We don't use sparse binding, therefore the other
|
||||
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
|
||||
// errors can't happen
|
||||
}
|
||||
};
|
||||
|
||||
let mem_ty = {
|
||||
let device_local = device.physical_device().memory_types()
|
||||
let device_local = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|t| t.is_device_local());
|
||||
let any = device.physical_device().memory_types()
|
||||
let any = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
|
||||
device_local.chain(any).next().unwrap()
|
||||
};
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Linear));
|
||||
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
|
||||
mem_ty,
|
||||
mem_reqs.size,
|
||||
mem_reqs.alignment,
|
||||
AllocLayout::Linear)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
try!(buffer.bind_memory(mem.memory(), mem.offset()));
|
||||
buffer.bind_memory(mem.memory(), mem.offset())?;
|
||||
|
||||
let final_buf = Arc::new(ImmutableBuffer {
|
||||
inner: buffer,
|
||||
@ -284,9 +301,15 @@ impl<T: ?Sized, A> ImmutableBuffer<T, A> {
|
||||
// TODO: use a custom iterator
|
||||
#[inline]
|
||||
pub fn queue_families(&self) -> Vec<QueueFamily> {
|
||||
self.queue_families.iter().map(|&num| {
|
||||
self.device().physical_device().queue_family_by_id(num).unwrap()
|
||||
}).collect()
|
||||
self.queue_families
|
||||
.iter()
|
||||
.map(|&num| {
|
||||
self.device()
|
||||
.physical_device()
|
||||
.queue_family_by_id(num)
|
||||
.unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
@ -402,30 +425,40 @@ impl<T: ?Sized, A> Clone for ImmutableBufferInitialization<T, A> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::cpu_access::CpuAccessibleBuffer;
|
||||
use buffer::immutable::ImmutableBuffer;
|
||||
use buffer::BufferUsage;
|
||||
use command_buffer::AutoCommandBufferBuilder;
|
||||
use command_buffer::CommandBuffer;
|
||||
use std::iter;
|
||||
use sync::GpuFuture;
|
||||
|
||||
#[test]
|
||||
fn from_data_working() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(),
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32,
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
|
||||
let dest = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
let dest = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
0)
|
||||
.unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(buffer, dest.clone()).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone()).unwrap()
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(buffer, dest.clone())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
|
||||
let dest_content = dest.read().unwrap();
|
||||
assert_eq!(*dest_content, 12);
|
||||
@ -435,19 +468,28 @@ mod tests {
|
||||
fn from_iter_working() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_iter((0 .. 512u32).map(|n| n * 2), BufferUsage::all(),
|
||||
let (buffer, _) = ImmutableBuffer::from_iter((0 .. 512u32).map(|n| n * 2),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
|
||||
let dest = CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(),
|
||||
let dest = CpuAccessibleBuffer::from_iter(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
(0 .. 512).map(|_| 0u32)).unwrap();
|
||||
(0 .. 512).map(|_| 0u32))
|
||||
.unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(buffer, dest.clone()).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone()).unwrap()
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(buffer, dest.clone())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
|
||||
let dest_content = dest.read().unwrap();
|
||||
for (n, &v) in dest_content.iter().enumerate() {
|
||||
@ -460,15 +502,22 @@ mod tests {
|
||||
fn writing_forbidden() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32, BufferUsage::all(),
|
||||
let (buffer, _) = ImmutableBuffer::from_data(12u32,
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.fill_buffer(buffer, 50).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone()).unwrap()
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.fill_buffer(buffer, 50)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -477,18 +526,28 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, _) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all(),
|
||||
iter::once(queue.family())).unwrap()
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let src = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
let src = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
0)
|
||||
.unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(src, buffer).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone()).unwrap()
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(src, buffer)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -496,19 +555,30 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, init) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all(),
|
||||
iter::once(queue.family())).unwrap()
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let src = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
let src = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
0)
|
||||
.unwrap();
|
||||
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(src.clone(), init).unwrap()
|
||||
.copy_buffer(buffer, src.clone()).unwrap()
|
||||
.build().unwrap()
|
||||
.execute(queue.clone()).unwrap()
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
let _ = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(src.clone(), init)
|
||||
.unwrap()
|
||||
.copy_buffer(buffer, src.clone())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -517,31 +587,46 @@ mod tests {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let (buffer, init) = unsafe {
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all(),
|
||||
iter::once(queue.family())).unwrap()
|
||||
ImmutableBuffer::<u32>::uninitialized(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()))
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let src = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
|
||||
iter::once(queue.family()), 0).unwrap();
|
||||
let src = CpuAccessibleBuffer::from_data(device.clone(),
|
||||
BufferUsage::all(),
|
||||
iter::once(queue.family()),
|
||||
0)
|
||||
.unwrap();
|
||||
|
||||
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(src.clone(), init).unwrap()
|
||||
.build().unwrap();
|
||||
let cb1 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(src.clone(), init)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let cb2 = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.copy_buffer(buffer, src.clone()).unwrap()
|
||||
.build().unwrap();
|
||||
let cb2 = AutoCommandBufferBuilder::new(device.clone(), queue.family())
|
||||
.unwrap()
|
||||
.copy_buffer(buffer, src.clone())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let _ = cb1.execute(queue.clone()).unwrap()
|
||||
.then_execute(queue.clone(), cb2).unwrap()
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
let _ = cb1.execute(queue.clone())
|
||||
.unwrap()
|
||||
.then_execute(queue.clone(), cb2)
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_buffer_zero_size_data() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let _ = ImmutableBuffer::from_data((), BufferUsage::all(), Some(queue.family()), queue.clone());
|
||||
let _ =
|
||||
ImmutableBuffer::from_data((), BufferUsage::all(), Some(queue.family()), queue.clone());
|
||||
}
|
||||
|
||||
// TODO: write tons of tests that try to exploit loopholes
|
||||
|
@ -114,8 +114,7 @@ impl<T: ?Sized, B> BufferSlice<T, B> {
|
||||
/// panic.
|
||||
#[inline]
|
||||
pub unsafe fn slice_custom<F, R: ?Sized>(self, f: F) -> BufferSlice<R, B>
|
||||
where F: for<'r> FnOnce(&'r T) -> &'r R
|
||||
// TODO: bounds on R
|
||||
where F: for<'r> FnOnce(&'r T) -> &'r R // TODO: bounds on R
|
||||
{
|
||||
let data: &T = mem::zeroed();
|
||||
let result = f(data);
|
||||
@ -147,7 +146,9 @@ impl<T, B> BufferSlice<[T], B> {
|
||||
/// Returns `None` if out of range.
|
||||
#[inline]
|
||||
pub fn index(self, index: usize) -> Option<BufferSlice<T, B>> {
|
||||
if index >= self.len() { return None; }
|
||||
if index >= self.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(BufferSlice {
|
||||
marker: PhantomData,
|
||||
@ -162,7 +163,9 @@ impl<T, B> BufferSlice<[T], B> {
|
||||
/// Returns `None` if out of range.
|
||||
#[inline]
|
||||
pub fn slice(self, range: Range<usize>) -> Option<BufferSlice<[T], B>> {
|
||||
if range.end > self.len() { return None; }
|
||||
if range.end > self.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(BufferSlice {
|
||||
marker: PhantomData,
|
||||
@ -173,7 +176,9 @@ impl<T, B> BufferSlice<[T], B> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B> where B: BufferAccess {
|
||||
unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B>
|
||||
where B: BufferAccess
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner {
|
||||
let inner = self.resource.inner();
|
||||
@ -189,13 +194,14 @@ unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B> where B: BufferAcce
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn conflicts_buffer(&self, self_offset: usize, self_size: usize,
|
||||
other: &BufferAccess, other_offset: usize, other_size: usize) -> bool
|
||||
{
|
||||
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
|
||||
other_offset: usize, other_size: usize)
|
||||
-> bool {
|
||||
let self_offset = self.offset + self_offset;
|
||||
// FIXME: spurious failures ; needs investigation
|
||||
//debug_assert!(self_size + self_offset <= self.size);
|
||||
self.resource.conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
|
||||
self.resource
|
||||
.conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -222,7 +228,9 @@ unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B> where B: BufferAcce
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, B> TypedBufferAccess for BufferSlice<T, B> where B: BufferAccess, {
|
||||
unsafe impl<T: ?Sized, B> TypedBufferAccess for BufferSlice<T, B>
|
||||
where B: BufferAccess
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
|
@ -24,12 +24,12 @@
|
||||
//! sparse binding.
|
||||
//! - Type safety.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use buffer::BufferUsage;
|
||||
use buffer::usage::usage_to_bits;
|
||||
@ -39,10 +39,10 @@ use memory::DeviceMemory;
|
||||
use memory::MemoryRequirements;
|
||||
use sync::Sharing;
|
||||
|
||||
use check_errors;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
/// Data storage in a GPU-accessible location.
|
||||
@ -81,10 +81,10 @@ impl UnsafeBuffer {
|
||||
let usage_bits = usage_to_bits(usage);
|
||||
|
||||
// Checking sparse features.
|
||||
assert!(sparse.sparse || !sparse.sparse_residency, "Can't enable sparse residency without \
|
||||
enabling sparse binding as well");
|
||||
assert!(sparse.sparse || !sparse.sparse_aliased, "Can't enable sparse aliasing without \
|
||||
enabling sparse binding as well");
|
||||
assert!(sparse.sparse || !sparse.sparse_residency,
|
||||
"Can't enable sparse residency without enabling sparse binding as well");
|
||||
assert!(sparse.sparse || !sparse.sparse_aliased,
|
||||
"Can't enable sparse aliasing without enabling sparse binding as well");
|
||||
if sparse.sparse && !device.enabled_features().sparse_binding {
|
||||
return Err(BufferCreationError::SparseBindingFeatureNotEnabled);
|
||||
}
|
||||
@ -113,13 +113,18 @@ impl UnsafeBuffer {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateBuffer(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateBuffer(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
let mem_reqs = {
|
||||
#[inline] fn align(val: usize, al: usize) -> usize { al * (1 + (val - 1) / al) }
|
||||
#[inline]
|
||||
fn align(val: usize, al: usize) -> usize {
|
||||
al * (1 + (val - 1) / al)
|
||||
}
|
||||
|
||||
let mut output: vk::MemoryRequirements = mem::uninitialized();
|
||||
vk.GetBufferMemoryRequirements(device.internal_object(), buffer, &mut output);
|
||||
@ -158,15 +163,14 @@ impl UnsafeBuffer {
|
||||
Ok((obj, mem_reqs))
|
||||
}
|
||||
|
||||
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize)
|
||||
-> Result<(), OomError>
|
||||
{
|
||||
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize) -> Result<(), OomError> {
|
||||
let vk = self.device.pointers();
|
||||
|
||||
// We check for correctness in debug mode.
|
||||
debug_assert!({
|
||||
let mut mem_reqs = mem::uninitialized();
|
||||
vk.GetBufferMemoryRequirements(self.device.internal_object(), self.buffer,
|
||||
vk.GetBufferMemoryRequirements(self.device.internal_object(),
|
||||
self.buffer,
|
||||
&mut mem_reqs);
|
||||
mem_reqs.size <= (memory.size() - offset) as u64 &&
|
||||
(offset as u64 % mem_reqs.alignment) == 0 &&
|
||||
@ -187,8 +191,10 @@ impl UnsafeBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
try!(check_errors(vk.BindBufferMemory(self.device.internal_object(), self.buffer,
|
||||
memory.internal_object(), offset as vk::DeviceSize)));
|
||||
check_errors(vk.BindBufferMemory(self.device.internal_object(),
|
||||
self.buffer,
|
||||
memory.internal_object(),
|
||||
offset as vk::DeviceSize))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -303,9 +309,15 @@ impl SparseLevel {
|
||||
#[inline]
|
||||
fn to_flags(&self) -> vk::BufferCreateFlagBits {
|
||||
let mut result = 0;
|
||||
if self.sparse { result |= vk::BUFFER_CREATE_SPARSE_BINDING_BIT; }
|
||||
if self.sparse_residency { result |= vk::BUFFER_CREATE_SPARSE_RESIDENCY_BIT; }
|
||||
if self.sparse_aliased { result |= vk::BUFFER_CREATE_SPARSE_ALIASED_BIT; }
|
||||
if self.sparse {
|
||||
result |= vk::BUFFER_CREATE_SPARSE_BINDING_BIT;
|
||||
}
|
||||
if self.sparse_residency {
|
||||
result |= vk::BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
|
||||
}
|
||||
if self.sparse_aliased {
|
||||
result |= vk::BUFFER_CREATE_SPARSE_ALIASED_BIT;
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
@ -344,7 +356,7 @@ impl error::Error for BufferCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
BufferCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -369,7 +381,7 @@ impl From<Error> for BufferCreationError {
|
||||
match err {
|
||||
err @ Error::OutOfHostMemory => BufferCreationError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => BufferCreationError::OomError(OomError::from(err)),
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -379,9 +391,9 @@ mod tests {
|
||||
use std::iter::Empty;
|
||||
|
||||
use super::BufferCreationError;
|
||||
use super::BufferUsage;
|
||||
use super::SparseLevel;
|
||||
use super::UnsafeBuffer;
|
||||
use super::BufferUsage;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
@ -391,7 +403,10 @@ mod tests {
|
||||
fn create() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let (buf, reqs) = unsafe {
|
||||
UnsafeBuffer::new(device.clone(), 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
|
||||
UnsafeBuffer::new(device.clone(),
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
SparseLevel::none())
|
||||
}.unwrap();
|
||||
|
||||
@ -401,23 +416,39 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Can't enable sparse residency without enabling sparse binding as well")]
|
||||
#[should_panic(expected = "Can't enable sparse residency without enabling sparse \
|
||||
binding as well")]
|
||||
fn panic_wrong_sparse_residency() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let sparse = SparseLevel { sparse: false, sparse_residency: true, sparse_aliased: false };
|
||||
let sparse = SparseLevel {
|
||||
sparse: false,
|
||||
sparse_residency: true,
|
||||
sparse_aliased: false,
|
||||
};
|
||||
let _ = unsafe {
|
||||
UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
|
||||
UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Can't enable sparse aliasing without enabling sparse binding as well")]
|
||||
#[should_panic(expected = "Can't enable sparse aliasing without enabling sparse \
|
||||
binding as well")]
|
||||
fn panic_wrong_sparse_aliased() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let sparse = SparseLevel { sparse: false, sparse_residency: false, sparse_aliased: true };
|
||||
let sparse = SparseLevel {
|
||||
sparse: false,
|
||||
sparse_residency: false,
|
||||
sparse_aliased: true,
|
||||
};
|
||||
let _ = unsafe {
|
||||
UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
|
||||
UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
};
|
||||
}
|
||||
@ -425,13 +456,19 @@ mod tests {
|
||||
#[test]
|
||||
fn missing_feature_sparse_binding() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let sparse = SparseLevel { sparse: true, sparse_residency: false, sparse_aliased: false };
|
||||
let sparse = SparseLevel {
|
||||
sparse: true,
|
||||
sparse_residency: false,
|
||||
sparse_aliased: false,
|
||||
};
|
||||
unsafe {
|
||||
match UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
{
|
||||
match UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse) {
|
||||
Err(BufferCreationError::SparseBindingFeatureNotEnabled) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -439,13 +476,19 @@ mod tests {
|
||||
#[test]
|
||||
fn missing_feature_sparse_residency() {
|
||||
let (device, _) = gfx_dev_and_queue!(sparse_binding);
|
||||
let sparse = SparseLevel { sparse: true, sparse_residency: true, sparse_aliased: false };
|
||||
let sparse = SparseLevel {
|
||||
sparse: true,
|
||||
sparse_residency: true,
|
||||
sparse_aliased: false,
|
||||
};
|
||||
unsafe {
|
||||
match UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
{
|
||||
match UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse) {
|
||||
Err(BufferCreationError::SparseResidencyBufferFeatureNotEnabled) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -453,13 +496,19 @@ mod tests {
|
||||
#[test]
|
||||
fn missing_feature_sparse_aliased() {
|
||||
let (device, _) = gfx_dev_and_queue!(sparse_binding);
|
||||
let sparse = SparseLevel { sparse: true, sparse_residency: false, sparse_aliased: true };
|
||||
let sparse = SparseLevel {
|
||||
sparse: true,
|
||||
sparse_residency: false,
|
||||
sparse_aliased: true,
|
||||
};
|
||||
unsafe {
|
||||
match UnsafeBuffer::new(device, 128, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>,
|
||||
sparse)
|
||||
{
|
||||
match UnsafeBuffer::new(device,
|
||||
128,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
sparse) {
|
||||
Err(BufferCreationError::SparseResidencyAliasedFeatureNotEnabled) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -469,7 +518,11 @@ mod tests {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
unsafe {
|
||||
let _ = UnsafeBuffer::new(device, 0, BufferUsage::all(), Sharing::Exclusive::<Empty<_>>, SparseLevel::none());
|
||||
let _ = UnsafeBuffer::new(device,
|
||||
0,
|
||||
BufferUsage::all(),
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
SparseLevel::none());
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,10 @@ pub unsafe trait BufferAccess: DeviceOwned {
|
||||
///
|
||||
/// This method can only be called for buffers whose type is known to be an array.
|
||||
#[inline]
|
||||
fn len(&self) -> usize where Self: TypedBufferAccess, Self::Content: Content {
|
||||
fn len(&self) -> usize
|
||||
where Self: TypedBufferAccess,
|
||||
Self::Content: Content
|
||||
{
|
||||
self.size() / <Self::Content as Content>::indiv_size()
|
||||
}
|
||||
|
||||
@ -95,10 +98,9 @@ pub unsafe trait BufferAccess: DeviceOwned {
|
||||
///
|
||||
/// If this function returns `false`, this means that we are allowed to access the offset/size
|
||||
/// of `self` at the same time as the offset/size of `other` without causing a data race.
|
||||
fn conflicts_buffer(&self, self_offset: usize, self_size: usize,
|
||||
other: &BufferAccess, other_offset: usize, other_size: usize)
|
||||
-> bool
|
||||
{
|
||||
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
|
||||
other_offset: usize, other_size: usize)
|
||||
-> bool {
|
||||
// TODO: should we really provide a default implementation?
|
||||
|
||||
debug_assert!(self_size <= self.size());
|
||||
@ -129,9 +131,11 @@ pub unsafe trait BufferAccess: DeviceOwned {
|
||||
/// of `self` at the same time as the offset/size of `other` without causing a data race.
|
||||
fn conflicts_image(&self, self_offset: usize, self_size: usize, other: &ImageAccess,
|
||||
other_first_layer: u32, other_num_layers: u32, other_first_mipmap: u32,
|
||||
other_num_mipmaps: u32) -> bool
|
||||
{
|
||||
let other_key = other.conflict_key(other_first_layer, other_num_layers, other_first_mipmap,
|
||||
other_num_mipmaps: u32)
|
||||
-> bool {
|
||||
let other_key = other.conflict_key(other_first_layer,
|
||||
other_num_layers,
|
||||
other_first_mipmap,
|
||||
other_num_mipmaps);
|
||||
self.conflict_key(self_offset, self_size) == other_key
|
||||
}
|
||||
@ -161,7 +165,12 @@ pub unsafe trait BufferAccess: DeviceOwned {
|
||||
/// Shortcut for `conflicts_image` that compares the whole buffer to a whole image.
|
||||
#[inline]
|
||||
fn conflicts_image_all(&self, other: &ImageAccess) -> bool {
|
||||
self.conflicts_image(0, self.size(), other, 0, other.dimensions().array_layers(), 0,
|
||||
self.conflicts_image(0,
|
||||
self.size(),
|
||||
other,
|
||||
0,
|
||||
other.dimensions().array_layers(),
|
||||
0,
|
||||
other.mipmap_levels())
|
||||
}
|
||||
|
||||
@ -214,7 +223,10 @@ pub struct BufferInner<'a> {
|
||||
pub offset: usize,
|
||||
}
|
||||
|
||||
unsafe impl<T> BufferAccess for T where T: SafeDeref, T::Target: BufferAccess {
|
||||
unsafe impl<T> BufferAccess for T
|
||||
where T: SafeDeref,
|
||||
T::Target: BufferAccess
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner {
|
||||
(**self).inner()
|
||||
@ -226,9 +238,9 @@ unsafe impl<T> BufferAccess for T where T: SafeDeref, T::Target: BufferAccess {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn conflicts_buffer(&self, self_offset: usize, self_size: usize,
|
||||
other: &BufferAccess, other_offset: usize, other_size: usize) -> bool
|
||||
{
|
||||
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
|
||||
other_offset: usize, other_size: usize)
|
||||
-> bool {
|
||||
(**self).conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
|
||||
}
|
||||
|
||||
@ -259,6 +271,9 @@ pub unsafe trait TypedBufferAccess: BufferAccess {
|
||||
type Content: ?Sized;
|
||||
}
|
||||
|
||||
unsafe impl<T> TypedBufferAccess for T where T: SafeDeref, T::Target: TypedBufferAccess {
|
||||
unsafe impl<T> TypedBufferAccess for T
|
||||
where T: SafeDeref,
|
||||
T::Target: TypedBufferAccess
|
||||
{
|
||||
type Content = <T::Target as TypedBufferAccess>::Content;
|
||||
}
|
||||
|
@ -183,14 +183,32 @@ impl BitOr for BufferUsage {
|
||||
#[inline]
|
||||
pub fn usage_to_bits(usage: BufferUsage) -> vk::BufferUsageFlagBits {
|
||||
let mut result = 0;
|
||||
if usage.transfer_source { result |= vk::BUFFER_USAGE_TRANSFER_SRC_BIT; }
|
||||
if usage.transfer_dest { result |= vk::BUFFER_USAGE_TRANSFER_DST_BIT; }
|
||||
if usage.uniform_texel_buffer { result |= vk::BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; }
|
||||
if usage.storage_texel_buffer { result |= vk::BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; }
|
||||
if usage.uniform_buffer { result |= vk::BUFFER_USAGE_UNIFORM_BUFFER_BIT; }
|
||||
if usage.storage_buffer { result |= vk::BUFFER_USAGE_STORAGE_BUFFER_BIT; }
|
||||
if usage.index_buffer { result |= vk::BUFFER_USAGE_INDEX_BUFFER_BIT; }
|
||||
if usage.vertex_buffer { result |= vk::BUFFER_USAGE_VERTEX_BUFFER_BIT; }
|
||||
if usage.indirect_buffer { result |= vk::BUFFER_USAGE_INDIRECT_BUFFER_BIT; }
|
||||
if usage.transfer_source {
|
||||
result |= vk::BUFFER_USAGE_TRANSFER_SRC_BIT;
|
||||
}
|
||||
if usage.transfer_dest {
|
||||
result |= vk::BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
}
|
||||
if usage.uniform_texel_buffer {
|
||||
result |= vk::BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
|
||||
}
|
||||
if usage.storage_texel_buffer {
|
||||
result |= vk::BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
|
||||
}
|
||||
if usage.uniform_buffer {
|
||||
result |= vk::BUFFER_USAGE_UNIFORM_BUFFER_BIT;
|
||||
}
|
||||
if usage.storage_buffer {
|
||||
result |= vk::BUFFER_USAGE_STORAGE_BUFFER_BIT;
|
||||
}
|
||||
if usage.index_buffer {
|
||||
result |= vk::BUFFER_USAGE_INDEX_BUFFER_BIT;
|
||||
}
|
||||
if usage.vertex_buffer {
|
||||
result |= vk::BUFFER_USAGE_VERTEX_BUFFER_BIT;
|
||||
}
|
||||
if usage.indirect_buffer {
|
||||
result |= vk::BUFFER_USAGE_INDIRECT_BUFFER_BIT;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
@ -37,9 +37,9 @@
|
||||
//! let _view = BufferView::new(buffer, format::R32Uint).unwrap();
|
||||
//! ```
|
||||
|
||||
use std::marker::PhantomData;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
@ -61,40 +61,42 @@ use vk;
|
||||
|
||||
/// Represents a way for the GPU to interpret buffer data. See the documentation of the
|
||||
/// `view` module.
|
||||
pub struct BufferView<F, B> where B: BufferAccess {
|
||||
pub struct BufferView<F, B>
|
||||
where B: BufferAccess
|
||||
{
|
||||
view: vk::BufferView,
|
||||
buffer: B,
|
||||
marker: PhantomData<F>,
|
||||
atomic_accesses: bool,
|
||||
}
|
||||
|
||||
impl<F, B> BufferView<F, B> where B: BufferAccess {
|
||||
impl<F, B> BufferView<F, B>
|
||||
where B: BufferAccess
|
||||
{
|
||||
/// Builds a new buffer view.
|
||||
#[inline]
|
||||
pub fn new(buffer: B, format: F) -> Result<BufferView<F, B>, BufferViewCreationError>
|
||||
where B: TypedBufferAccess<Content = [F::Pixel]>,
|
||||
F: StrongStorage + 'static
|
||||
{
|
||||
unsafe {
|
||||
BufferView::unchecked(buffer, format)
|
||||
}
|
||||
unsafe { BufferView::unchecked(buffer, format) }
|
||||
}
|
||||
|
||||
/// Builds a new buffer view from a `BufferAccess` object.
|
||||
#[inline]
|
||||
#[deprecated = "Use new() instead"]
|
||||
pub fn from_access(buffer: B, format: F) -> Result<BufferView<F, B>, BufferViewCreationError>
|
||||
where B: TypedBufferAccess<Content = [F::Pixel]>, F: StrongStorage + 'static
|
||||
where B: TypedBufferAccess<Content = [F::Pixel]>,
|
||||
F: StrongStorage + 'static
|
||||
{
|
||||
unsafe {
|
||||
BufferView::unchecked(buffer, format)
|
||||
}
|
||||
unsafe { BufferView::unchecked(buffer, format) }
|
||||
}
|
||||
|
||||
/// Builds a new buffer view without checking that the format is correct.
|
||||
pub unsafe fn unchecked(org_buffer: B, format: F)
|
||||
-> Result<BufferView<F, B>, BufferViewCreationError>
|
||||
where B: BufferAccess, F: FormatDesc + 'static
|
||||
where B: BufferAccess,
|
||||
F: FormatDesc + 'static
|
||||
{
|
||||
let (view, format_props) = {
|
||||
let size = org_buffer.size();
|
||||
@ -110,8 +112,14 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
|
||||
}
|
||||
|
||||
{
|
||||
let nb = size / format.size().expect("Can't use a compressed format for buffer views");
|
||||
let l = device.physical_device().limits().max_texel_buffer_elements();
|
||||
let nb = size /
|
||||
format
|
||||
.size()
|
||||
.expect("Can't use a compressed format for buffer views");
|
||||
let l = device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_texel_buffer_elements();
|
||||
if nb > l as usize {
|
||||
return Err(BufferViewCreationError::MaxTexelBufferElementsExceeded);
|
||||
}
|
||||
@ -121,7 +129,8 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
|
||||
let vk_i = device.instance().pointers();
|
||||
let mut output = mem::uninitialized();
|
||||
vk_i.GetPhysicalDeviceFormatProperties(device.physical_device().internal_object(),
|
||||
format as u32, &mut output);
|
||||
format as u32,
|
||||
&mut output);
|
||||
output.bufferFeatures
|
||||
};
|
||||
|
||||
@ -149,8 +158,10 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
|
||||
|
||||
let vk = device.pointers();
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateBufferView(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateBufferView(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
(output, format_props)
|
||||
};
|
||||
|
||||
@ -159,7 +170,8 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
|
||||
buffer: org_buffer,
|
||||
marker: PhantomData,
|
||||
atomic_accesses: (format_props &
|
||||
vk::FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) != 0,
|
||||
vk::FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) !=
|
||||
0,
|
||||
})
|
||||
}
|
||||
|
||||
@ -188,7 +200,9 @@ impl<F, B> BufferView<F, B> where B: BufferAccess {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<F, B> VulkanObject for BufferView<F, B> where B: BufferAccess {
|
||||
unsafe impl<F, B> VulkanObject for BufferView<F, B>
|
||||
where B: BufferAccess
|
||||
{
|
||||
type Object = vk::BufferView;
|
||||
|
||||
#[inline]
|
||||
@ -206,7 +220,9 @@ unsafe impl<F, B> DeviceOwned for BufferView<F, B>
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, B> fmt::Debug for BufferView<F, B> where B: BufferAccess + fmt::Debug {
|
||||
impl<F, B> fmt::Debug for BufferView<F, B>
|
||||
where B: BufferAccess + fmt::Debug
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("BufferView")
|
||||
.field("raw", &self.view)
|
||||
@ -215,12 +231,15 @@ impl<F, B> fmt::Debug for BufferView<F, B> where B: BufferAccess + fmt::Debug {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, B> Drop for BufferView<F, B> where B: BufferAccess {
|
||||
impl<F, B> Drop for BufferView<F, B>
|
||||
where B: BufferAccess
|
||||
{
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let vk = self.buffer.inner().buffer.device().pointers();
|
||||
vk.DestroyBufferView(self.buffer.inner().buffer.device().internal_object(), self.view,
|
||||
vk.DestroyBufferView(self.buffer.inner().buffer.device().internal_object(),
|
||||
self.view,
|
||||
ptr::null());
|
||||
}
|
||||
}
|
||||
@ -233,7 +252,9 @@ pub unsafe trait BufferViewRef {
|
||||
fn view(&self) -> &BufferView<Self::Format, Self::BufferAccess>;
|
||||
}
|
||||
|
||||
unsafe impl<F, B> BufferViewRef for BufferView<F, B> where B: BufferAccess {
|
||||
unsafe impl<F, B> BufferViewRef for BufferView<F, B>
|
||||
where B: BufferAccess
|
||||
{
|
||||
type BufferAccess = B;
|
||||
type Format = F;
|
||||
|
||||
@ -243,7 +264,10 @@ unsafe impl<F, B> BufferViewRef for BufferView<F, B> where B: BufferAccess {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, F, B> BufferViewRef for T where T: SafeDeref<Target = BufferView<F, B>>, B: BufferAccess {
|
||||
unsafe impl<T, F, B> BufferViewRef for T
|
||||
where T: SafeDeref<Target = BufferView<F, B>>,
|
||||
B: BufferAccess
|
||||
{
|
||||
type BufferAccess = B;
|
||||
type Format = F;
|
||||
|
||||
@ -275,10 +299,10 @@ impl error::Error for BufferViewCreationError {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
BufferViewCreationError::OomError(_) => "out of memory when creating buffer view",
|
||||
BufferViewCreationError::WrongBufferUsage => "the buffer is missing correct usage \
|
||||
flags",
|
||||
BufferViewCreationError::UnsupportedFormat => "the requested format is not supported \
|
||||
for this usage",
|
||||
BufferViewCreationError::WrongBufferUsage =>
|
||||
"the buffer is missing correct usage flags",
|
||||
BufferViewCreationError::UnsupportedFormat =>
|
||||
"the requested format is not supported for this usage",
|
||||
BufferViewCreationError::MaxTexelBufferElementsExceeded => {
|
||||
"the maximum number of texel elements is exceeded"
|
||||
},
|
||||
@ -317,10 +341,10 @@ impl From<Error> for BufferViewCreationError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use buffer::BufferView;
|
||||
use buffer::BufferUsage;
|
||||
use buffer::view::BufferViewCreationError;
|
||||
use buffer::BufferView;
|
||||
use buffer::immutable::ImmutableBuffer;
|
||||
use buffer::view::BufferViewCreationError;
|
||||
use format;
|
||||
|
||||
#[test]
|
||||
@ -333,8 +357,11 @@ mod tests {
|
||||
..BufferUsage::none()
|
||||
};
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), usage,
|
||||
Some(queue.family()), queue.clone()).unwrap();
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0 .. 128).map(|_| [0; 4]),
|
||||
usage,
|
||||
Some(queue.family()),
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
let view = BufferView::new(buffer, format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
assert!(view.uniform_texel_buffer());
|
||||
@ -350,9 +377,11 @@ mod tests {
|
||||
..BufferUsage::none()
|
||||
};
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), usage,
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0 .. 128).map(|_| [0; 4]),
|
||||
usage,
|
||||
Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
let view = BufferView::new(buffer, format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
assert!(view.storage_texel_buffer());
|
||||
@ -368,9 +397,11 @@ mod tests {
|
||||
..BufferUsage::none()
|
||||
};
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::<[u32]>::from_iter((0..128).map(|_| 0), usage,
|
||||
let (buffer, _) = ImmutableBuffer::<[u32]>::from_iter((0 .. 128).map(|_| 0),
|
||||
usage,
|
||||
Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
let view = BufferView::new(buffer, format::R32Uint).unwrap();
|
||||
|
||||
assert!(view.storage_texel_buffer());
|
||||
@ -385,11 +416,12 @@ mod tests {
|
||||
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter((0 .. 128).map(|_| [0; 4]),
|
||||
BufferUsage::none(),
|
||||
Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
|
||||
match BufferView::new(buffer, format::R8G8B8A8Unorm) {
|
||||
Err(BufferViewCreationError::WrongBufferUsage) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -404,13 +436,15 @@ mod tests {
|
||||
};
|
||||
|
||||
let (buffer, _) = ImmutableBuffer::<[[f64; 4]]>::from_iter((0 .. 128).map(|_| [0.0; 4]),
|
||||
usage, Some(queue.family()),
|
||||
queue.clone()).unwrap();
|
||||
usage,
|
||||
Some(queue.family()),
|
||||
queue.clone())
|
||||
.unwrap();
|
||||
|
||||
// TODO: what if R64G64B64A64Sfloat is supported?
|
||||
match BufferView::new(buffer, format::R64G64B64A64Sfloat) {
|
||||
Err(BufferViewCreationError::UnsupportedFormat) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ use std::mem;
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
|
||||
use OomError;
|
||||
use buffer::BufferAccess;
|
||||
use buffer::TypedBufferAccess;
|
||||
use command_buffer::CommandBuffer;
|
||||
@ -29,8 +30,8 @@ use command_buffer::pool::standard::StandardCommandPoolAlloc;
|
||||
use command_buffer::pool::standard::StandardCommandPoolBuilder;
|
||||
use command_buffer::synced::SyncCommandBuffer;
|
||||
use command_buffer::synced::SyncCommandBufferBuilder;
|
||||
use command_buffer::synced::SyncCommandBufferBuilderError;
|
||||
use command_buffer::synced::SyncCommandBufferBuilderBindVertexBuffer;
|
||||
use command_buffer::synced::SyncCommandBufferBuilderError;
|
||||
use command_buffer::sys::Flags;
|
||||
use command_buffer::sys::Kind;
|
||||
use command_buffer::sys::UnsafeCommandBuffer;
|
||||
@ -43,10 +44,10 @@ use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use device::Queue;
|
||||
use framebuffer::FramebufferAbstract;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use framebuffer::RenderPassAbstract;
|
||||
use image::ImageLayout;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use image::ImageAccess;
|
||||
use image::ImageLayout;
|
||||
use instance::QueueFamily;
|
||||
use pipeline::ComputePipelineAbstract;
|
||||
use pipeline::GraphicsPipelineAbstract;
|
||||
@ -54,9 +55,8 @@ use pipeline::input_assembly::Index;
|
||||
use pipeline::vertex::VertexSource;
|
||||
use sync::AccessCheckError;
|
||||
use sync::AccessFlagBits;
|
||||
use sync::PipelineStages;
|
||||
use sync::GpuFuture;
|
||||
use OomError;
|
||||
use sync::PipelineStages;
|
||||
|
||||
///
|
||||
///
|
||||
@ -72,8 +72,7 @@ pub struct AutoCommandBufferBuilder<P = StandardCommandPoolBuilder> {
|
||||
|
||||
impl AutoCommandBufferBuilder<StandardCommandPoolBuilder> {
|
||||
pub fn new(device: Arc<Device>, queue_family: QueueFamily)
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError>
|
||||
{
|
||||
-> Result<AutoCommandBufferBuilder<StandardCommandPoolBuilder>, OomError> {
|
||||
unsafe {
|
||||
let pool = Device::standard_command_pool(&device, queue_family);
|
||||
let inner = SyncCommandBufferBuilder::new(&pool, Kind::primary(), Flags::None);
|
||||
@ -94,9 +93,7 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
where P: CommandPoolBuilderAlloc
|
||||
{
|
||||
// TODO: error if we're inside a render pass
|
||||
Ok(AutoCommandBuffer {
|
||||
inner: self.inner.build()?
|
||||
})
|
||||
Ok(AutoCommandBuffer { inner: self.inner.build()? })
|
||||
}
|
||||
|
||||
/// Adds a command that enters a render pass.
|
||||
@ -107,15 +104,15 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
///
|
||||
/// You must call this before you can add draw commands.
|
||||
#[inline]
|
||||
pub fn begin_render_pass<F, C>(mut self, framebuffer: F, secondary: bool,
|
||||
clear_values: C)
|
||||
pub fn begin_render_pass<F, C>(mut self, framebuffer: F, secondary: bool, clear_values: C)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
where F: FramebufferAbstract + RenderPassDescClearValues<C> + Send + Sync + 'static
|
||||
{
|
||||
unsafe {
|
||||
let clear_values = framebuffer.convert_clear_values(clear_values);
|
||||
let clear_values = clear_values.collect::<Vec<_>>().into_iter(); // TODO: necessary for Send + Sync ; needs an API rework of convert_clear_values
|
||||
self.inner.begin_render_pass(framebuffer, secondary, clear_values);
|
||||
self.inner
|
||||
.begin_render_pass(framebuffer, secondary, clear_values);
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -125,9 +122,10 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
/// This command will copy from the source to the destination. If their size is not equal, then
|
||||
/// the amount of data copied is equal to the smallest of the two.
|
||||
#[inline]
|
||||
pub fn copy_buffer<S, D>(mut self, src: S, dest: D) -> Result<Self, validity::CheckCopyBufferError>
|
||||
pub fn copy_buffer<S, D>(mut self, src: S, dest: D)
|
||||
-> Result<Self, validity::CheckCopyBufferError>
|
||||
where S: BufferAccess + Send + Sync + 'static,
|
||||
D: BufferAccess + Send + Sync + 'static,
|
||||
D: BufferAccess + Send + Sync + 'static
|
||||
{
|
||||
unsafe {
|
||||
// TODO: check that we're not in a render pass
|
||||
@ -143,19 +141,19 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
pub fn copy_buffer_to_image<S, D>(mut self, src: S, dest: D)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
where S: BufferAccess + Send + Sync + 'static,
|
||||
D: ImageAccess + Send + Sync + 'static,
|
||||
D: ImageAccess + Send + Sync + 'static
|
||||
{
|
||||
let dims = dest.dimensions().width_height_depth();
|
||||
self.copy_buffer_to_image_dimensions(src, dest, [0, 0, 0], dims, 0, 1, 0)
|
||||
}
|
||||
|
||||
/// Adds a command that copies from a buffer to an image.
|
||||
pub fn copy_buffer_to_image_dimensions<S, D>(mut self, src: S, dest: D, offset: [u32; 3],
|
||||
size: [u32; 3], first_layer: u32, num_layers: u32,
|
||||
mipmap: u32)
|
||||
pub fn copy_buffer_to_image_dimensions<S, D>(
|
||||
mut self, src: S, dest: D, offset: [u32; 3], size: [u32; 3], first_layer: u32,
|
||||
num_layers: u32, mipmap: u32)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
where S: BufferAccess + Send + Sync + 'static,
|
||||
D: ImageAccess + Send + Sync + 'static,
|
||||
D: ImageAccess + Send + Sync + 'static
|
||||
{
|
||||
unsafe {
|
||||
// TODO: check that we're not in a render pass
|
||||
@ -167,7 +165,11 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
buffer_row_length: 0,
|
||||
buffer_image_height: 0,
|
||||
image_aspect: if dest.has_color() {
|
||||
UnsafeCommandBufferBuilderImageAspect { color: true, depth: false, stencil: false }
|
||||
UnsafeCommandBufferBuilderImageAspect {
|
||||
color: true,
|
||||
depth: false,
|
||||
stencil: false,
|
||||
}
|
||||
} else {
|
||||
unimplemented!()
|
||||
},
|
||||
@ -189,12 +191,14 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
pub fn dispatch<Cp, S, Pc>(mut self, dimensions: [u32; 3], pipeline: Cp, sets: S, constants: Pc)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
where Cp: ComputePipelineAbstract + Send + Sync + 'static + Clone, // TODO: meh for Clone
|
||||
S: DescriptorSetsCollection,
|
||||
S: DescriptorSetsCollection
|
||||
{
|
||||
unsafe {
|
||||
// TODO: missing checks
|
||||
|
||||
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_compute_pipeline(&pipeline) {
|
||||
if let StateCacherOutcome::NeedChange =
|
||||
self.state_cacher.bind_compute_pipeline(&pipeline)
|
||||
{
|
||||
self.inner.bind_pipeline_compute(pipeline.clone());
|
||||
}
|
||||
|
||||
@ -208,43 +212,50 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
#[inline]
|
||||
pub fn draw<V, Gp, S, Pc>(mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, sets: S,
|
||||
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
constants: Pc)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
|
||||
S: DescriptorSetsCollection,
|
||||
S: DescriptorSetsCollection
|
||||
{
|
||||
unsafe {
|
||||
// TODO: missing checks
|
||||
|
||||
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
|
||||
if let StateCacherOutcome::NeedChange =
|
||||
self.state_cacher.bind_graphics_pipeline(&pipeline)
|
||||
{
|
||||
self.inner.bind_pipeline_graphics(pipeline.clone());
|
||||
}
|
||||
|
||||
push_constants(&mut self.inner, pipeline.clone(), constants);
|
||||
set_state(&mut self.inner, dynamic);
|
||||
descriptor_sets(&mut self.inner, true, pipeline.clone(), sets);
|
||||
let (vertex_count, instance_count) = vertex_buffers(&mut self.inner, &pipeline,
|
||||
vertices);
|
||||
let (vertex_count, instance_count) =
|
||||
vertex_buffers(&mut self.inner, &pipeline, vertices);
|
||||
|
||||
self.inner.draw(vertex_count as u32, instance_count as u32, 0, 0);
|
||||
self.inner
|
||||
.draw(vertex_count as u32, instance_count as u32, 0, 0);
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(mut self, pipeline: Gp, dynamic: DynamicState,
|
||||
vertices: V, index_buffer: Ib, sets: S,
|
||||
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
pub fn draw_indexed<V, Gp, S, Pc, Ib, I>(
|
||||
mut self, pipeline: Gp, dynamic: DynamicState, vertices: V, index_buffer: Ib, sets: S,
|
||||
constants: Pc)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
|
||||
S: DescriptorSetsCollection,
|
||||
Ib: BufferAccess + TypedBufferAccess<Content = [I]> + Send + Sync + 'static,
|
||||
I: Index + 'static,
|
||||
I: Index + 'static
|
||||
{
|
||||
unsafe {
|
||||
// TODO: missing checks
|
||||
|
||||
let index_count = index_buffer.len();
|
||||
|
||||
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
|
||||
if let StateCacherOutcome::NeedChange =
|
||||
self.state_cacher.bind_graphics_pipeline(&pipeline)
|
||||
{
|
||||
self.inner.bind_pipeline_graphics(pipeline.clone());
|
||||
}
|
||||
|
||||
@ -262,18 +273,24 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
|
||||
#[inline]
|
||||
pub fn draw_indirect<V, Gp, S, Pc, Ib>(mut self, pipeline: Gp, dynamic: DynamicState,
|
||||
vertices: V, indirect_buffer: Ib, sets: S,
|
||||
constants: Pc) -> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
vertices: V, indirect_buffer: Ib, sets: S, constants: Pc)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
where Gp: GraphicsPipelineAbstract + VertexSource<V> + Send + Sync + 'static + Clone, // TODO: meh for Clone
|
||||
S: DescriptorSetsCollection,
|
||||
Ib: BufferAccess + TypedBufferAccess<Content = [DrawIndirectCommand]> + Send + Sync + 'static,
|
||||
Ib: BufferAccess
|
||||
+ TypedBufferAccess<Content = [DrawIndirectCommand]>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static
|
||||
{
|
||||
unsafe {
|
||||
// TODO: missing checks
|
||||
|
||||
let draw_count = indirect_buffer.len() as u32;
|
||||
|
||||
if let StateCacherOutcome::NeedChange = self.state_cacher.bind_graphics_pipeline(&pipeline) {
|
||||
if let StateCacherOutcome::NeedChange =
|
||||
self.state_cacher.bind_graphics_pipeline(&pipeline)
|
||||
{
|
||||
self.inner.bind_pipeline_graphics(pipeline.clone());
|
||||
}
|
||||
|
||||
@ -282,7 +299,8 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
descriptor_sets(&mut self.inner, true, pipeline.clone(), sets);
|
||||
vertex_buffers(&mut self.inner, &pipeline, vertices);
|
||||
|
||||
self.inner.draw_indirect(indirect_buffer, draw_count,
|
||||
self.inner.draw_indirect(indirect_buffer,
|
||||
draw_count,
|
||||
mem::size_of::<DrawIndirectCommand>() as u32);
|
||||
Ok(self)
|
||||
}
|
||||
@ -312,8 +330,9 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
/// > this function only for zeroing the content of a buffer by passing `0` for the data.
|
||||
// TODO: not safe because of signalling NaNs
|
||||
#[inline]
|
||||
pub fn fill_buffer<B>(mut self, buffer: B, data: u32) -> Result<Self, validity::CheckFillBufferError>
|
||||
where B: BufferAccess + Send + Sync + 'static,
|
||||
pub fn fill_buffer<B>(mut self, buffer: B, data: u32)
|
||||
-> Result<Self, validity::CheckFillBufferError>
|
||||
where B: BufferAccess + Send + Sync + 'static
|
||||
{
|
||||
unsafe {
|
||||
// TODO: check that we're not in a render pass
|
||||
@ -326,8 +345,7 @@ impl<P> AutoCommandBufferBuilder<P> {
|
||||
/// Adds a command that jumps to the next subpass of the current render pass.
|
||||
#[inline]
|
||||
pub fn next_subpass(mut self, secondary: bool)
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError>
|
||||
{
|
||||
-> Result<Self, AutoCommandBufferBuilderContextError> {
|
||||
unsafe {
|
||||
// TODO: check
|
||||
self.inner.next_subpass(secondary);
|
||||
@ -377,7 +395,7 @@ unsafe fn push_constants<P, Pl, Pc>(dest: &mut SyncCommandBufferBuilder<P>, pipe
|
||||
for num_range in 0 .. pipeline.num_push_constants_ranges() {
|
||||
let range = match pipeline.push_constants_range(num_range) {
|
||||
Some(r) => r,
|
||||
None => continue
|
||||
None => continue,
|
||||
};
|
||||
|
||||
debug_assert_eq!(range.offset % 4, 0);
|
||||
@ -387,8 +405,10 @@ unsafe fn push_constants<P, Pl, Pc>(dest: &mut SyncCommandBufferBuilder<P>, pipe
|
||||
.offset(range.offset as isize),
|
||||
range.size as usize);
|
||||
|
||||
dest.push_constants::<_, [u8]>(pipeline.clone(), range.stages,
|
||||
range.offset as u32, range.size as u32,
|
||||
dest.push_constants::<_, [u8]>(pipeline.clone(),
|
||||
range.stages,
|
||||
range.offset as u32,
|
||||
range.size as u32,
|
||||
data);
|
||||
}
|
||||
}
|
||||
@ -410,8 +430,9 @@ unsafe fn set_state<P>(dest: &mut SyncCommandBufferBuilder<P>, dynamic: DynamicS
|
||||
|
||||
// Shortcut function to bind vertex buffers.
|
||||
unsafe fn vertex_buffers<P, Gp, V>(dest: &mut SyncCommandBufferBuilder<P>, pipeline: &Gp,
|
||||
vertices: V) -> (u32, u32)
|
||||
where Gp: VertexSource<V>,
|
||||
vertices: V)
|
||||
-> (u32, u32)
|
||||
where Gp: VertexSource<V>
|
||||
{
|
||||
let (vertex_buffers, vertex_count, instance_count) = pipeline.decode(vertices);
|
||||
|
||||
@ -451,22 +472,24 @@ unsafe impl<P> CommandBuffer for AutoCommandBuffer<P> {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
|
||||
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue)
|
||||
-> Result<(), CommandBufferExecError> {
|
||||
self.inner.prepare_submit(future, queue)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
fn check_buffer_access(
|
||||
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
self.inner.check_buffer_access(buffer, exclusive, queue)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
self.inner.check_image_access(image, layout, exclusive, queue)
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
|
||||
queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
self.inner
|
||||
.check_image_access(image, layout, exclusive, queue)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,8 +73,8 @@
|
||||
//! alternative command pool implementations and use them. See the `pool` module for more
|
||||
//! information.
|
||||
|
||||
pub use self::auto::AutoCommandBufferBuilder;
|
||||
pub use self::auto::AutoCommandBuffer;
|
||||
pub use self::auto::AutoCommandBufferBuilder;
|
||||
pub use self::state_cacher::StateCacher;
|
||||
pub use self::state_cacher::StateCacherOutcome;
|
||||
pub use self::traits::CommandBuffer;
|
||||
@ -82,8 +82,8 @@ pub use self::traits::CommandBufferBuild;
|
||||
pub use self::traits::CommandBufferExecError;
|
||||
pub use self::traits::CommandBufferExecFuture;
|
||||
|
||||
use pipeline::viewport::Viewport;
|
||||
use pipeline::viewport::Scissor;
|
||||
use pipeline::viewport::Viewport;
|
||||
|
||||
pub mod pool;
|
||||
pub mod submit;
|
||||
|
@ -18,14 +18,14 @@
|
||||
|
||||
use instance::QueueFamily;
|
||||
|
||||
use device::DeviceOwned;
|
||||
use OomError;
|
||||
use device::DeviceOwned;
|
||||
|
||||
pub use self::standard::StandardCommandPool;
|
||||
pub use self::sys::CommandPoolTrimError;
|
||||
pub use self::sys::UnsafeCommandPool;
|
||||
pub use self::sys::UnsafeCommandPoolAlloc;
|
||||
pub use self::sys::UnsafeCommandPoolAllocIter;
|
||||
pub use self::sys::CommandPoolTrimError;
|
||||
|
||||
pub mod standard;
|
||||
mod sys;
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use fnv::FnvHasher;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::BuildHasherDefault;
|
||||
@ -14,7 +15,6 @@ use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::Weak;
|
||||
use fnv::FnvHasher;
|
||||
|
||||
use command_buffer::pool::CommandPool;
|
||||
use command_buffer::pool::CommandPoolAlloc;
|
||||
@ -23,16 +23,18 @@ use command_buffer::pool::UnsafeCommandPool;
|
||||
use command_buffer::pool::UnsafeCommandPoolAlloc;
|
||||
use instance::QueueFamily;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
|
||||
// Since the stdlib doesn't have a "thread ID" yet, we store a `Box<u8>` for each thread and the
|
||||
// value of the pointer will be used as a thread id.
|
||||
thread_local!(static THREAD_ID: Box<u8> = Box::new(0));
|
||||
#[inline]
|
||||
fn curr_thread_id() -> usize { THREAD_ID.with(|data| &**data as *const u8 as usize) }
|
||||
fn curr_thread_id() -> usize {
|
||||
THREAD_ID.with(|data| &**data as *const u8 as usize)
|
||||
}
|
||||
|
||||
/// Standard implementation of a command pool.
|
||||
///
|
||||
@ -47,12 +49,15 @@ pub struct StandardCommandPool {
|
||||
queue_family: u32,
|
||||
|
||||
// For each "thread id" (see `THREAD_ID` above), we store thread-specific info.
|
||||
per_thread: Mutex<HashMap<usize, Weak<Mutex<StandardCommandPoolPerThread>>,
|
||||
per_thread: Mutex<HashMap<usize,
|
||||
Weak<Mutex<StandardCommandPoolPerThread>>,
|
||||
BuildHasherDefault<FnvHasher>>>,
|
||||
}
|
||||
|
||||
unsafe impl Send for StandardCommandPool {}
|
||||
unsafe impl Sync for StandardCommandPool {}
|
||||
unsafe impl Send for StandardCommandPool {
|
||||
}
|
||||
unsafe impl Sync for StandardCommandPool {
|
||||
}
|
||||
|
||||
struct StandardCommandPoolPerThread {
|
||||
// The Vulkan pool of this thread.
|
||||
@ -98,8 +103,8 @@ unsafe impl CommandPool for Arc<StandardCommandPool> {
|
||||
let per_thread = match per_thread {
|
||||
Some(pt) => pt,
|
||||
None => {
|
||||
let new_pool = try!(UnsafeCommandPool::new(self.device.clone(), self.queue_family(),
|
||||
false, true));
|
||||
let new_pool =
|
||||
UnsafeCommandPool::new(self.device.clone(), self.queue_family(), false, true)?;
|
||||
let pt = Arc::new(Mutex::new(StandardCommandPoolPerThread {
|
||||
pool: new_pool,
|
||||
available_primary_command_buffers: Vec::new(),
|
||||
@ -116,23 +121,31 @@ unsafe impl CommandPool for Arc<StandardCommandPool> {
|
||||
// Build an iterator to pick from already-existing command buffers.
|
||||
let (num_from_existing, from_existing) = {
|
||||
// Which list of already-existing command buffers we are going to pick CBs from.
|
||||
let mut existing = if secondary { &mut pt_lock.available_secondary_command_buffers }
|
||||
else { &mut pt_lock.available_primary_command_buffers };
|
||||
let mut existing = if secondary {
|
||||
&mut pt_lock.available_secondary_command_buffers
|
||||
} else {
|
||||
&mut pt_lock.available_primary_command_buffers
|
||||
};
|
||||
let num_from_existing = cmp::min(count as usize, existing.len());
|
||||
let from_existing = existing.drain(0 .. num_from_existing).collect::<Vec<_>>().into_iter();
|
||||
let from_existing = existing
|
||||
.drain(0 .. num_from_existing)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter();
|
||||
(num_from_existing, from_existing)
|
||||
};
|
||||
|
||||
// Build an iterator to construct the missing command buffers from the Vulkan pool.
|
||||
let num_new = count as usize - num_from_existing;
|
||||
debug_assert!(num_new <= count as usize); // Check overflows.
|
||||
let newly_allocated = try!(pt_lock.pool.alloc_command_buffers(secondary, num_new));
|
||||
let newly_allocated = pt_lock.pool.alloc_command_buffers(secondary, num_new)?;
|
||||
|
||||
// Returning them as a chain.
|
||||
let device = self.device.clone();
|
||||
let queue_family_id = self.queue_family;
|
||||
let per_thread = per_thread.clone();
|
||||
let final_iter = from_existing.chain(newly_allocated).map(move |cmd| {
|
||||
let final_iter = from_existing
|
||||
.chain(newly_allocated)
|
||||
.map(move |cmd| {
|
||||
StandardCommandPoolBuilder {
|
||||
cmd: Some(cmd),
|
||||
pool: per_thread.clone(),
|
||||
@ -141,14 +154,18 @@ unsafe impl CommandPool for Arc<StandardCommandPool> {
|
||||
queue_family_id: queue_family_id,
|
||||
dummy_avoid_send_sync: PhantomData,
|
||||
}
|
||||
}).collect::<Vec<_>>();
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(Box::new(final_iter.into_iter()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn queue_family(&self) -> QueueFamily {
|
||||
self.device.physical_device().queue_family_by_id(self.queue_family).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.queue_family_by_id(self.queue_family)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,7 +206,10 @@ unsafe impl CommandPoolBuilderAlloc for StandardCommandPoolBuilder {
|
||||
|
||||
#[inline]
|
||||
fn queue_family(&self) -> QueueFamily {
|
||||
self.device.physical_device().queue_family_by_id(self.queue_family_id).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.queue_family_by_id(self.queue_family_id)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -222,8 +242,10 @@ pub struct StandardCommandPoolAlloc {
|
||||
queue_family_id: u32,
|
||||
}
|
||||
|
||||
unsafe impl Send for StandardCommandPoolAlloc {}
|
||||
unsafe impl Sync for StandardCommandPoolAlloc {}
|
||||
unsafe impl Send for StandardCommandPoolAlloc {
|
||||
}
|
||||
unsafe impl Sync for StandardCommandPoolAlloc {
|
||||
}
|
||||
|
||||
unsafe impl CommandPoolAlloc for StandardCommandPoolAlloc {
|
||||
#[inline]
|
||||
@ -233,7 +255,10 @@ unsafe impl CommandPoolAlloc for StandardCommandPoolAlloc {
|
||||
|
||||
#[inline]
|
||||
fn queue_family(&self) -> QueueFamily {
|
||||
self.device.physical_device().queue_family_by_id(self.queue_family_id).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.queue_family_by_id(self.queue_family_id)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,9 +274,11 @@ impl Drop for StandardCommandPoolAlloc {
|
||||
let mut pool = self.pool.lock().unwrap();
|
||||
|
||||
if self.secondary {
|
||||
pool.available_secondary_command_buffers.push(self.cmd.take().unwrap());
|
||||
pool.available_secondary_command_buffers
|
||||
.push(self.cmd.take().unwrap());
|
||||
} else {
|
||||
pool.available_primary_command_buffers.push(self.cmd.take().unwrap());
|
||||
pool.available_primary_command_buffers
|
||||
.push(self.cmd.take().unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,23 +7,23 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
use std::vec::IntoIter as VecIntoIter;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use instance::QueueFamily;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use Error;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use vk;
|
||||
|
||||
/// Low-level implementation of a command pool.
|
||||
@ -46,7 +46,8 @@ pub struct UnsafeCommandPool {
|
||||
dummy_avoid_sync: PhantomData<*const u8>,
|
||||
}
|
||||
|
||||
unsafe impl Send for UnsafeCommandPool {}
|
||||
unsafe impl Send for UnsafeCommandPool {
|
||||
}
|
||||
|
||||
impl UnsafeCommandPool {
|
||||
/// Creates a new pool.
|
||||
@ -62,9 +63,8 @@ impl UnsafeCommandPool {
|
||||
///
|
||||
/// - Panics if the queue family doesn't belong to the same physical device as `device`.
|
||||
///
|
||||
pub fn new(device: Arc<Device>, queue_family: QueueFamily, transient: bool,
|
||||
reset_cb: bool) -> Result<UnsafeCommandPool, OomError>
|
||||
{
|
||||
pub fn new(device: Arc<Device>, queue_family: QueueFamily, transient: bool, reset_cb: bool)
|
||||
-> Result<UnsafeCommandPool, OomError> {
|
||||
assert_eq!(device.physical_device().internal_object(),
|
||||
queue_family.physical_device().internal_object(),
|
||||
"Device doesn't match physical device when creating a command pool");
|
||||
@ -72,9 +72,16 @@ impl UnsafeCommandPool {
|
||||
let vk = device.pointers();
|
||||
|
||||
let flags = {
|
||||
let flag1 = if transient { vk::COMMAND_POOL_CREATE_TRANSIENT_BIT } else { 0 };
|
||||
let flag2 = if reset_cb { vk::COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT }
|
||||
else { 0 };
|
||||
let flag1 = if transient {
|
||||
vk::COMMAND_POOL_CREATE_TRANSIENT_BIT
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let flag2 = if reset_cb {
|
||||
vk::COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT
|
||||
} else {
|
||||
0
|
||||
};
|
||||
flag1 | flag2
|
||||
};
|
||||
|
||||
@ -87,8 +94,10 @@ impl UnsafeCommandPool {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateCommandPool(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateCommandPool(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -110,11 +119,14 @@ impl UnsafeCommandPool {
|
||||
/// The command buffers allocated from this pool jump to the initial state.
|
||||
///
|
||||
pub unsafe fn reset(&self, release_resources: bool) -> Result<(), OomError> {
|
||||
let flags = if release_resources { vk::COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT }
|
||||
else { 0 };
|
||||
let flags = if release_resources {
|
||||
vk::COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.ResetCommandPool(self.device.internal_object(), self.pool, flags)));
|
||||
check_errors(vk.ResetCommandPool(self.device.internal_object(), self.pool, flags))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -134,7 +146,9 @@ impl UnsafeCommandPool {
|
||||
}
|
||||
|
||||
let vk = self.device.pointers();
|
||||
vk.TrimCommandPoolKHR(self.device.internal_object(), self.pool, 0 /* reserved */);
|
||||
vk.TrimCommandPoolKHR(self.device.internal_object(),
|
||||
self.pool,
|
||||
0 /* reserved */);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -144,34 +158,33 @@ impl UnsafeCommandPool {
|
||||
/// If `secondary` is true, allocates secondary command buffers. Otherwise, allocates primary
|
||||
/// command buffers.
|
||||
pub fn alloc_command_buffers(&self, secondary: bool, count: usize)
|
||||
-> Result<UnsafeCommandPoolAllocIter, OomError>
|
||||
{
|
||||
-> Result<UnsafeCommandPoolAllocIter, OomError> {
|
||||
if count == 0 {
|
||||
return Ok(UnsafeCommandPoolAllocIter {
|
||||
list: None
|
||||
});
|
||||
return Ok(UnsafeCommandPoolAllocIter { list: None });
|
||||
}
|
||||
|
||||
let infos = vk::CommandBufferAllocateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
commandPool: self.pool,
|
||||
level: if secondary { vk::COMMAND_BUFFER_LEVEL_SECONDARY }
|
||||
else { vk::COMMAND_BUFFER_LEVEL_PRIMARY },
|
||||
level: if secondary {
|
||||
vk::COMMAND_BUFFER_LEVEL_SECONDARY
|
||||
} else {
|
||||
vk::COMMAND_BUFFER_LEVEL_PRIMARY
|
||||
},
|
||||
commandBufferCount: count as u32,
|
||||
};
|
||||
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
let mut out = Vec::with_capacity(count);
|
||||
try!(check_errors(vk.AllocateCommandBuffers(self.device.internal_object(), &infos,
|
||||
out.as_mut_ptr())));
|
||||
check_errors(vk.AllocateCommandBuffers(self.device.internal_object(),
|
||||
&infos,
|
||||
out.as_mut_ptr()))?;
|
||||
|
||||
out.set_len(count);
|
||||
|
||||
Ok(UnsafeCommandPoolAllocIter {
|
||||
list: Some(out.into_iter())
|
||||
})
|
||||
Ok(UnsafeCommandPoolAllocIter { list: Some(out.into_iter()) })
|
||||
}
|
||||
}
|
||||
|
||||
@ -186,14 +199,19 @@ impl UnsafeCommandPool {
|
||||
{
|
||||
let command_buffers: SmallVec<[_; 4]> = command_buffers.map(|cb| cb.0).collect();
|
||||
let vk = self.device.pointers();
|
||||
vk.FreeCommandBuffers(self.device.internal_object(), self.pool,
|
||||
command_buffers.len() as u32, command_buffers.as_ptr())
|
||||
vk.FreeCommandBuffers(self.device.internal_object(),
|
||||
self.pool,
|
||||
command_buffers.len() as u32,
|
||||
command_buffers.as_ptr())
|
||||
}
|
||||
|
||||
/// Returns the queue family on which command buffers of this pool can be executed.
|
||||
#[inline]
|
||||
pub fn queue_family(&self) -> QueueFamily {
|
||||
self.device.physical_device().queue_family_by_id(self.queue_family_index).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.queue_family_by_id(self.queue_family_index)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -238,7 +256,7 @@ unsafe impl VulkanObject for UnsafeCommandPoolAlloc {
|
||||
/// Iterator for newly-allocated command buffers.
|
||||
#[derive(Debug)]
|
||||
pub struct UnsafeCommandPoolAllocIter {
|
||||
list: Option<VecIntoIter<vk::CommandBuffer>>
|
||||
list: Option<VecIntoIter<vk::CommandBuffer>>,
|
||||
}
|
||||
|
||||
impl Iterator for UnsafeCommandPoolAllocIter {
|
||||
@ -246,16 +264,23 @@ impl Iterator for UnsafeCommandPoolAllocIter {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<UnsafeCommandPoolAlloc> {
|
||||
self.list.as_mut().and_then(|i| i.next()).map(|cb| UnsafeCommandPoolAlloc(cb))
|
||||
self.list
|
||||
.as_mut()
|
||||
.and_then(|i| i.next())
|
||||
.map(|cb| UnsafeCommandPoolAlloc(cb))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.list.as_ref().map(|i| i.size_hint()).unwrap_or((0, Some(0)))
|
||||
self.list
|
||||
.as_ref()
|
||||
.map(|i| i.size_hint())
|
||||
.unwrap_or((0, Some(0)))
|
||||
}
|
||||
}
|
||||
|
||||
impl ExactSizeIterator for UnsafeCommandPoolAllocIter {}
|
||||
impl ExactSizeIterator for UnsafeCommandPoolAllocIter {
|
||||
}
|
||||
|
||||
/// Error that can happen when trimming command pools.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
@ -268,8 +293,8 @@ impl error::Error for CommandPoolTrimError {
|
||||
#[inline]
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
CommandPoolTrimError::Maintenance1ExtensionNotEnabled => "the `KHR_maintenance1` \
|
||||
extension was not enabled",
|
||||
CommandPoolTrimError::Maintenance1ExtensionNotEnabled =>
|
||||
"the `KHR_maintenance1` extension was not enabled",
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -290,8 +315,8 @@ impl From<Error> for CommandPoolTrimError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use command_buffer::pool::UnsafeCommandPool;
|
||||
use command_buffer::pool::CommandPoolTrimError;
|
||||
use command_buffer::pool::UnsafeCommandPool;
|
||||
|
||||
#[test]
|
||||
fn basic_create() {
|
||||
@ -321,7 +346,7 @@ mod tests {
|
||||
|
||||
match pool.trim() {
|
||||
Err(CommandPoolTrimError::Maintenance1ExtensionNotEnabled) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,10 +7,10 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use VulkanObject;
|
||||
use command_buffer::DynamicState;
|
||||
use pipeline::ComputePipelineAbstract;
|
||||
use pipeline::GraphicsPipelineAbstract;
|
||||
use VulkanObject;
|
||||
use vk;
|
||||
|
||||
/// Keep track of the state of a command buffer builder, so that you don't need to bind objects
|
||||
|
@ -7,11 +7,11 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::ptr;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use buffer::sys::UnsafeBuffer;
|
||||
use device::Queue;
|
||||
@ -20,12 +20,12 @@ use memory::DeviceMemory;
|
||||
use sync::Fence;
|
||||
use sync::Semaphore;
|
||||
|
||||
use check_errors;
|
||||
use vk;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
// TODO: correctly implement Debug on all the structs of this module
|
||||
|
||||
@ -127,8 +127,7 @@ impl<'a> SubmitBindSparseBuilder<'a> {
|
||||
/// error.
|
||||
#[inline]
|
||||
pub fn merge(&mut self, other: SubmitBindSparseBuilder<'a>)
|
||||
-> Result<(), SubmitBindSparseBuilder<'a>>
|
||||
{
|
||||
-> Result<(), SubmitBindSparseBuilder<'a>> {
|
||||
if self.fence != 0 && other.fence != 0 {
|
||||
return Err(other);
|
||||
}
|
||||
@ -147,7 +146,8 @@ impl<'a> SubmitBindSparseBuilder<'a> {
|
||||
|
||||
// We start by storing all the `VkSparseBufferMemoryBindInfo`s of the whole command
|
||||
// in the same collection.
|
||||
let buffer_binds_storage: SmallVec<[_; 4]> = self.infos.iter()
|
||||
let buffer_binds_storage: SmallVec<[_; 4]> = self.infos
|
||||
.iter()
|
||||
.flat_map(|infos| infos.buffer_binds.iter())
|
||||
.map(|buf_bind| {
|
||||
vk::SparseBufferMemoryBindInfo {
|
||||
@ -159,7 +159,8 @@ impl<'a> SubmitBindSparseBuilder<'a> {
|
||||
.collect();
|
||||
|
||||
// Same for all the `VkSparseImageOpaqueMemoryBindInfo`s.
|
||||
let image_opaque_binds_storage: SmallVec<[_; 4]> = self.infos.iter()
|
||||
let image_opaque_binds_storage: SmallVec<[_; 4]> = self.infos
|
||||
.iter()
|
||||
.flat_map(|infos| infos.image_opaque_binds.iter())
|
||||
.map(|img_bind| {
|
||||
vk::SparseImageOpaqueMemoryBindInfo {
|
||||
@ -171,7 +172,8 @@ impl<'a> SubmitBindSparseBuilder<'a> {
|
||||
.collect();
|
||||
|
||||
// And finally the `VkSparseImageMemoryBindInfo`s.
|
||||
let image_binds_storage: SmallVec<[_; 4]> = self.infos.iter()
|
||||
let image_binds_storage: SmallVec<[_; 4]> = self.infos
|
||||
.iter()
|
||||
.flat_map(|infos| infos.image_binds.iter())
|
||||
.map(|img_bind| {
|
||||
vk::SparseImageMemoryBindInfo {
|
||||
@ -231,14 +233,17 @@ impl<'a> SubmitBindSparseBuilder<'a> {
|
||||
|
||||
// If these assertions fail, then there's something wrong in the code above.
|
||||
debug_assert_eq!(next_buffer_bind as usize, buffer_binds_storage.len());
|
||||
debug_assert_eq!(next_image_opaque_bind as usize, image_opaque_binds_storage.len());
|
||||
debug_assert_eq!(next_image_opaque_bind as usize,
|
||||
image_opaque_binds_storage.len());
|
||||
debug_assert_eq!(next_image_bind as usize, image_binds_storage.len());
|
||||
|
||||
bs_infos
|
||||
};
|
||||
|
||||
// Finally executing the command.
|
||||
check_errors(vk.QueueBindSparse(*queue, bs_infos.len() as u32, bs_infos.as_ptr(),
|
||||
check_errors(vk.QueueBindSparse(*queue,
|
||||
bs_infos.len() as u32,
|
||||
bs_infos.as_ptr(),
|
||||
self.fence))?;
|
||||
Ok(())
|
||||
}
|
||||
@ -359,8 +364,7 @@ impl<'a> SubmitBindSparseBufferBindBuilder<'a> {
|
||||
}
|
||||
|
||||
pub unsafe fn add_bind(&mut self, offset: usize, size: usize, memory: &DeviceMemory,
|
||||
memory_offset: usize)
|
||||
{
|
||||
memory_offset: usize) {
|
||||
self.binds.push(vk::SparseMemoryBind {
|
||||
resourceOffset: offset as vk::DeviceSize,
|
||||
size: size as vk::DeviceSize,
|
||||
@ -401,8 +405,7 @@ impl<'a> SubmitBindSparseImageOpaqueBindBuilder<'a> {
|
||||
}
|
||||
|
||||
pub unsafe fn add_bind(&mut self, offset: usize, size: usize, memory: &DeviceMemory,
|
||||
memory_offset: usize, bind_metadata: bool)
|
||||
{
|
||||
memory_offset: usize, bind_metadata: bool) {
|
||||
self.binds.push(vk::SparseMemoryBind {
|
||||
resourceOffset: offset as vk::DeviceSize,
|
||||
size: size as vk::DeviceSize,
|
||||
@ -473,7 +476,7 @@ impl error::Error for SubmitBindSparseError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
SubmitBindSparseError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -492,7 +495,7 @@ impl From<Error> for SubmitBindSparseError {
|
||||
err @ Error::OutOfHostMemory => SubmitBindSparseError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => SubmitBindSparseError::OomError(OomError::from(err)),
|
||||
Error::DeviceLost => SubmitBindSparseError::DeviceLost,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -13,12 +13,12 @@
|
||||
//! module. These structs are low-level and unsafe, and are mostly used to implement other parts
|
||||
//! of vulkano, so you are encouraged to not use them directly.
|
||||
|
||||
pub use self::bind_sparse::SubmitBindSparseBuilder;
|
||||
pub use self::bind_sparse::SubmitBindSparseBatchBuilder;
|
||||
pub use self::bind_sparse::SubmitBindSparseBufferBindBuilder;
|
||||
pub use self::bind_sparse::SubmitBindSparseImageOpaqueBindBuilder;
|
||||
pub use self::bind_sparse::SubmitBindSparseImageBindBuilder;
|
||||
pub use self::bind_sparse::SubmitBindSparseBuilder;
|
||||
pub use self::bind_sparse::SubmitBindSparseError;
|
||||
pub use self::bind_sparse::SubmitBindSparseImageBindBuilder;
|
||||
pub use self::bind_sparse::SubmitBindSparseImageOpaqueBindBuilder;
|
||||
pub use self::queue_present::SubmitPresentBuilder;
|
||||
pub use self::queue_present::SubmitPresentError;
|
||||
pub use self::queue_submit::SubmitCommandBufferBuilder;
|
||||
|
@ -7,23 +7,23 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use device::Queue;
|
||||
use swapchain::Swapchain;
|
||||
use sync::Semaphore;
|
||||
|
||||
use check_errors;
|
||||
use vk;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
/// Prototype for a submission that presents a swapchain on the screen.
|
||||
// TODO: example here
|
||||
@ -113,7 +113,7 @@ impl<'a> SubmitPresentBuilder<'a> {
|
||||
pResults: results.as_mut_ptr(),
|
||||
};
|
||||
|
||||
try!(check_errors(vk.QueuePresentKHR(*queue, &infos)));
|
||||
check_errors(vk.QueuePresentKHR(*queue, &infos))?;
|
||||
|
||||
for result in results {
|
||||
// TODO: AMD driver initially didn't write the results ; check that it's been fixed
|
||||
@ -158,7 +158,7 @@ impl error::Error for SubmitPresentError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
SubmitPresentError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -179,7 +179,7 @@ impl From<Error> for SubmitPresentError {
|
||||
Error::DeviceLost => SubmitPresentError::DeviceLost,
|
||||
Error::SurfaceLost => SubmitPresentError::SurfaceLost,
|
||||
Error::OutOfDate => SubmitPresentError::OutOfDate,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,11 +7,11 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::ptr;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use command_buffer::sys::UnsafeCommandBuffer;
|
||||
use device::Queue;
|
||||
@ -19,12 +19,12 @@ use sync::Fence;
|
||||
use sync::PipelineStages;
|
||||
use sync::Semaphore;
|
||||
|
||||
use check_errors;
|
||||
use vk;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
/// Prototype for a submission that executes command buffers.
|
||||
// TODO: example here
|
||||
@ -219,7 +219,7 @@ impl<'a> SubmitCommandBufferBuilder<'a> {
|
||||
pSignalSemaphores: self.signal_semaphores.as_ptr(),
|
||||
};
|
||||
|
||||
try!(check_errors(vk.QueueSubmit(*queue, 1, &batch, self.fence)));
|
||||
check_errors(vk.QueueSubmit(*queue, 1, &batch, self.fence))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -271,7 +271,7 @@ impl error::Error for SubmitCommandBufferError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
SubmitCommandBufferError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -288,17 +288,18 @@ impl From<Error> for SubmitCommandBufferError {
|
||||
fn from(err: Error) -> SubmitCommandBufferError {
|
||||
match err {
|
||||
err @ Error::OutOfHostMemory => SubmitCommandBufferError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => SubmitCommandBufferError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory =>
|
||||
SubmitCommandBufferError::OomError(OomError::from(err)),
|
||||
Error::DeviceLost => SubmitCommandBufferError::DeviceLost,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
use sync::Fence;
|
||||
|
||||
#[test]
|
||||
|
@ -27,9 +27,7 @@ impl<'a> SubmitSemaphoresWaitBuilder<'a> {
|
||||
/// Builds a new empty `SubmitSemaphoresWaitBuilder`.
|
||||
#[inline]
|
||||
pub fn new() -> SubmitSemaphoresWaitBuilder<'a> {
|
||||
SubmitSemaphoresWaitBuilder {
|
||||
semaphores: SmallVec::new(),
|
||||
}
|
||||
SubmitSemaphoresWaitBuilder { semaphores: SmallVec::new() }
|
||||
}
|
||||
|
||||
/// Adds an operation that waits on a semaphore.
|
||||
@ -53,7 +51,8 @@ impl<'a> Into<SubmitCommandBufferBuilder<'a>> for SubmitSemaphoresWaitBuilder<'a
|
||||
unsafe {
|
||||
let mut builder = SubmitCommandBufferBuilder::new();
|
||||
for sem in self.semaphores.drain() {
|
||||
builder.add_wait_semaphore(sem, PipelineStages {
|
||||
builder.add_wait_semaphore(sem,
|
||||
PipelineStages {
|
||||
// TODO: correct stages ; hard
|
||||
all_commands: true,
|
||||
..PipelineStages::none()
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -7,23 +7,26 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use buffer::BufferInner;
|
||||
use check_errors;
|
||||
use command_buffer::CommandBuffer;
|
||||
use command_buffer::pool::CommandPool;
|
||||
use command_buffer::pool::CommandPoolBuilderAlloc;
|
||||
use command_buffer::pool::CommandPoolAlloc;
|
||||
use command_buffer::pool::CommandPoolBuilderAlloc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSet;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use format::ClearValue;
|
||||
@ -33,8 +36,8 @@ use framebuffer::FramebufferAbstract;
|
||||
use framebuffer::RenderPass;
|
||||
use framebuffer::RenderPassAbstract;
|
||||
use framebuffer::Subpass;
|
||||
use image::ImageLayout;
|
||||
use image::ImageAccess;
|
||||
use image::ImageLayout;
|
||||
use instance::QueueFamily;
|
||||
use pipeline::ComputePipelineAbstract;
|
||||
use pipeline::GraphicsPipelineAbstract;
|
||||
@ -42,11 +45,8 @@ use pipeline::input_assembly::IndexType;
|
||||
use pipeline::viewport::Scissor;
|
||||
use pipeline::viewport::Viewport;
|
||||
use sync::AccessFlagBits;
|
||||
use sync::PipelineStages;
|
||||
use sync::Event;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use sync::PipelineStages;
|
||||
use vk;
|
||||
|
||||
/// Determines the kind of command buffer that we want to create.
|
||||
@ -71,14 +71,19 @@ pub enum Kind<R, F> {
|
||||
},
|
||||
}
|
||||
|
||||
impl Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
|
||||
impl
|
||||
Kind<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
|
||||
/// Equivalent to `Kind::Primary`.
|
||||
///
|
||||
/// > **Note**: If you use `let kind = Kind::Primary;` in your code, you will probably get a
|
||||
/// > compilation error because the Rust compiler couldn't determine the template parameters
|
||||
/// > of `Kind`. To solve that problem in an easy way you can use this function instead.
|
||||
#[inline]
|
||||
pub fn primary() -> Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
|
||||
pub fn primary()
|
||||
-> Kind<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
{
|
||||
Kind::Primary
|
||||
}
|
||||
|
||||
@ -88,7 +93,10 @@ impl Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<Empt
|
||||
/// > compilation error because the Rust compiler couldn't determine the template parameters
|
||||
/// > of `Kind`. To solve that problem in an easy way you can use this function instead.
|
||||
#[inline]
|
||||
pub fn secondary() -> Kind<RenderPass<EmptySinglePassRenderPassDesc>, Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>> {
|
||||
pub fn secondary()
|
||||
-> Kind<RenderPass<EmptySinglePassRenderPassDesc>,
|
||||
Framebuffer<RenderPass<EmptySinglePassRenderPassDesc>, ()>>
|
||||
{
|
||||
Kind::Secondary
|
||||
}
|
||||
}
|
||||
@ -164,11 +172,13 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
{
|
||||
let secondary = match kind {
|
||||
Kind::Primary => false,
|
||||
Kind::Secondary | Kind::SecondaryRenderPass { .. } => true,
|
||||
Kind::Secondary |
|
||||
Kind::SecondaryRenderPass { .. } => true,
|
||||
};
|
||||
|
||||
let cmd = try!(pool.alloc(secondary, 1)).next().expect("Requested one command buffer from \
|
||||
the command pool, but got zero.");
|
||||
let cmd = pool.alloc(secondary, 1)?
|
||||
.next()
|
||||
.expect("Requested one command buffer from the command pool, but got zero.");
|
||||
UnsafeCommandBufferBuilder::already_allocated(cmd, kind, flags)
|
||||
}
|
||||
|
||||
@ -213,7 +223,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
(0, 0)
|
||||
};
|
||||
|
||||
let framebuffer = if let Kind::SecondaryRenderPass { ref subpass, framebuffer: Some(ref framebuffer) } = kind {
|
||||
let framebuffer = if let Kind::SecondaryRenderPass {
|
||||
ref subpass,
|
||||
framebuffer: Some(ref framebuffer),
|
||||
} = kind
|
||||
{
|
||||
// TODO: restore check
|
||||
//assert!(framebuffer.is_compatible_with(subpass.render_pass())); // TODO: proper error
|
||||
FramebufferAbstract::inner(&framebuffer).internal_object()
|
||||
@ -239,7 +253,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
pInheritanceInfo: &inheritance,
|
||||
};
|
||||
|
||||
try!(check_errors(vk.BeginCommandBuffer(cmd, &infos)));
|
||||
check_errors(vk.BeginCommandBuffer(cmd, &infos))?;
|
||||
|
||||
Ok(UnsafeCommandBufferBuilder {
|
||||
cmd: Some(alloc),
|
||||
@ -265,7 +279,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
unsafe {
|
||||
let cmd = self.cmd.take().unwrap();
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.EndCommandBuffer(cmd.inner().internal_object())));
|
||||
check_errors(vk.EndCommandBuffer(cmd.inner().internal_object()))?;
|
||||
let cmd_raw = cmd.inner().internal_object();
|
||||
|
||||
Ok(UnsafeCommandBuffer {
|
||||
@ -293,8 +307,8 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let raw_render_pass = RenderPassAbstract::inner(&framebuffer).internal_object();
|
||||
let raw_framebuffer = FramebufferAbstract::inner(&framebuffer).internal_object();
|
||||
|
||||
let raw_clear_values: SmallVec<[_; 12]> = clear_values.map(|clear_value| {
|
||||
match clear_value {
|
||||
let raw_clear_values: SmallVec<[_; 12]> = clear_values
|
||||
.map(|clear_value| match clear_value {
|
||||
ClearValue::None => {
|
||||
vk::ClearValue::color(vk::ClearColorValue::float32([0.0; 4]))
|
||||
},
|
||||
@ -309,25 +323,30 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
},
|
||||
ClearValue::Depth(val) => {
|
||||
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
|
||||
depth: val, stencil: 0
|
||||
depth: val,
|
||||
stencil: 0,
|
||||
})
|
||||
},
|
||||
ClearValue::Stencil(val) => {
|
||||
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
|
||||
depth: 0.0, stencil: val
|
||||
depth: 0.0,
|
||||
stencil: val,
|
||||
})
|
||||
},
|
||||
ClearValue::DepthStencil((depth, stencil)) => {
|
||||
vk::ClearValue::depth_stencil(vk::ClearDepthStencilValue {
|
||||
depth: depth, stencil: stencil,
|
||||
depth: depth,
|
||||
stencil: stencil,
|
||||
})
|
||||
},
|
||||
}
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
// TODO: allow customizing
|
||||
let rect = [0 .. framebuffer.dimensions()[0],
|
||||
0 .. framebuffer.dimensions()[1]];
|
||||
let rect = [
|
||||
0 .. framebuffer.dimensions()[0],
|
||||
0 .. framebuffer.dimensions()[1],
|
||||
];
|
||||
|
||||
let begin = vk::RenderPassBeginInfo {
|
||||
sType: vk::STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||
@ -348,8 +367,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
pClearValues: raw_clear_values.as_ptr(),
|
||||
};
|
||||
|
||||
let contents = if secondary { vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS }
|
||||
else { vk::SUBPASS_CONTENTS_INLINE };
|
||||
let contents = if secondary {
|
||||
vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
|
||||
} else {
|
||||
vk::SUBPASS_CONTENTS_INLINE
|
||||
};
|
||||
|
||||
vk.CmdBeginRenderPass(cmd, &begin, contents);
|
||||
}
|
||||
@ -364,7 +386,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
sets: S, dynamic_offsets: I)
|
||||
where Pl: ?Sized + PipelineLayoutAbstract,
|
||||
S: Iterator<Item = &'s UnsafeDescriptorSet>,
|
||||
I: Iterator<Item = u32>,
|
||||
I: Iterator<Item = u32>
|
||||
{
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
@ -378,12 +400,20 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let num_bindings = sets.len() as u32;
|
||||
debug_assert!(first_binding + num_bindings <= pipeline_layout.num_sets() as u32);
|
||||
|
||||
let bind_point = if graphics { vk::PIPELINE_BIND_POINT_GRAPHICS }
|
||||
else { vk::PIPELINE_BIND_POINT_COMPUTE };
|
||||
let bind_point = if graphics {
|
||||
vk::PIPELINE_BIND_POINT_GRAPHICS
|
||||
} else {
|
||||
vk::PIPELINE_BIND_POINT_COMPUTE
|
||||
};
|
||||
|
||||
vk.CmdBindDescriptorSets(cmd, bind_point, pipeline_layout.sys().internal_object(),
|
||||
first_binding, num_bindings, sets.as_ptr(),
|
||||
dynamic_offsets.len() as u32, dynamic_offsets.as_ptr());
|
||||
vk.CmdBindDescriptorSets(cmd,
|
||||
bind_point,
|
||||
pipeline_layout.sys().internal_object(),
|
||||
first_binding,
|
||||
num_bindings,
|
||||
sets.as_ptr(),
|
||||
dynamic_offsets.len() as u32,
|
||||
dynamic_offsets.as_ptr());
|
||||
}
|
||||
|
||||
/// Calls `vkCmdBindIndexBuffer` on the builder.
|
||||
@ -398,7 +428,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert!(inner.offset < inner.buffer.size());
|
||||
debug_assert!(inner.buffer.usage_index_buffer());
|
||||
|
||||
vk.CmdBindIndexBuffer(cmd, inner.buffer.internal_object(), inner.offset as vk::DeviceSize,
|
||||
vk.CmdBindIndexBuffer(cmd,
|
||||
inner.buffer.internal_object(),
|
||||
inner.offset as vk::DeviceSize,
|
||||
index_ty as vk::IndexType);
|
||||
}
|
||||
|
||||
@ -409,7 +441,8 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
{
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdBindPipeline(cmd, vk::PIPELINE_BIND_POINT_COMPUTE,
|
||||
vk.CmdBindPipeline(cmd,
|
||||
vk::PIPELINE_BIND_POINT_COMPUTE,
|
||||
pipeline.inner().internal_object());
|
||||
}
|
||||
|
||||
@ -430,8 +463,7 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
/// usage of the command anyway.
|
||||
#[inline]
|
||||
pub unsafe fn bind_vertex_buffers(&mut self, first_binding: u32,
|
||||
params: UnsafeCommandBufferBuilderBindVertexBuffer)
|
||||
{
|
||||
params: UnsafeCommandBufferBuilderBindVertexBuffer) {
|
||||
debug_assert_eq!(params.raw_buffers.len(), params.offsets.len());
|
||||
|
||||
if params.raw_buffers.is_empty() {
|
||||
@ -444,11 +476,17 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let num_bindings = params.raw_buffers.len() as u32;
|
||||
|
||||
debug_assert!({
|
||||
let max_bindings = self.device().physical_device().limits().max_vertex_input_bindings();
|
||||
let max_bindings = self.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_vertex_input_bindings();
|
||||
first_binding + num_bindings <= max_bindings
|
||||
});
|
||||
|
||||
vk.CmdBindVertexBuffers(cmd, first_binding, num_bindings, params.raw_buffers.as_ptr(),
|
||||
vk.CmdBindVertexBuffers(cmd,
|
||||
first_binding,
|
||||
num_bindings,
|
||||
params.raw_buffers.as_ptr(),
|
||||
params.offsets.as_ptr());
|
||||
}
|
||||
|
||||
@ -495,13 +533,15 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert!(destination.offset < destination.buffer.size());
|
||||
debug_assert!(destination.buffer.usage_transfer_dest());
|
||||
|
||||
let regions: SmallVec<[_; 8]> = regions.map(|(sr, de, sz)| {
|
||||
let regions: SmallVec<[_; 8]> = regions
|
||||
.map(|(sr, de, sz)| {
|
||||
vk::BufferCopy {
|
||||
srcOffset: (sr + source.offset) as vk::DeviceSize,
|
||||
dstOffset: (de + destination.offset) as vk::DeviceSize,
|
||||
size: sz as vk::DeviceSize,
|
||||
}
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
if regions.is_empty() {
|
||||
return;
|
||||
@ -509,8 +549,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdCopyBuffer(cmd, source.buffer.internal_object(), destination.buffer.internal_object(),
|
||||
regions.len() as u32, regions.as_ptr());
|
||||
vk.CmdCopyBuffer(cmd,
|
||||
source.buffer.internal_object(),
|
||||
destination.buffer.internal_object(),
|
||||
regions.len() as u32,
|
||||
regions.as_ptr());
|
||||
}
|
||||
|
||||
/// Calls `vkCmdCopyBufferToImage` on the builder.
|
||||
@ -528,7 +571,8 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert!(source.offset < source.buffer.size());
|
||||
debug_assert!(source.buffer.usage_transfer_src());
|
||||
|
||||
let regions: SmallVec<[_; 8]> = regions.map(|copy| {
|
||||
let regions: SmallVec<[_; 8]> = regions
|
||||
.map(|copy| {
|
||||
vk::BufferImageCopy {
|
||||
bufferOffset: (source.offset + copy.buffer_offset) as vk::DeviceSize,
|
||||
bufferRowLength: copy.buffer_row_length,
|
||||
@ -550,7 +594,8 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
depth: copy.image_extent[2],
|
||||
},
|
||||
}
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
if regions.is_empty() {
|
||||
return;
|
||||
@ -563,16 +608,22 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdCopyBufferToImage(cmd, source.buffer.internal_object(),
|
||||
destination.inner().internal_object(), dest_layout as u32,
|
||||
regions.len() as u32, regions.as_ptr());
|
||||
vk.CmdCopyBufferToImage(cmd,
|
||||
source.buffer.internal_object(),
|
||||
destination.inner().internal_object(),
|
||||
dest_layout as u32,
|
||||
regions.len() as u32,
|
||||
regions.as_ptr());
|
||||
}
|
||||
|
||||
/// Calls `vkCmdDispatch` on the builder.
|
||||
#[inline]
|
||||
pub unsafe fn dispatch(&mut self, dimensions: [u32; 3]) {
|
||||
debug_assert!({
|
||||
let max_dims = self.device().physical_device().limits().max_compute_work_group_count();
|
||||
let max_dims = self.device()
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_compute_work_group_count();
|
||||
dimensions[0] <= max_dims[0] && dimensions[1] <= max_dims[1] &&
|
||||
dimensions[2] <= max_dims[2]
|
||||
});
|
||||
@ -595,27 +646,35 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert!(inner.buffer.usage_indirect_buffer());
|
||||
debug_assert_eq!(inner.offset % 4, 0);
|
||||
|
||||
vk.CmdDispatchIndirect(cmd, inner.buffer.internal_object(), inner.offset as vk::DeviceSize);
|
||||
vk.CmdDispatchIndirect(cmd,
|
||||
inner.buffer.internal_object(),
|
||||
inner.offset as vk::DeviceSize);
|
||||
}
|
||||
|
||||
/// Calls `vkCmdDraw` on the builder.
|
||||
#[inline]
|
||||
pub unsafe fn draw(&mut self, vertex_count: u32, instance_count: u32, first_vertex: u32,
|
||||
first_instance: u32)
|
||||
{
|
||||
first_instance: u32) {
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdDraw(cmd, vertex_count, instance_count, first_vertex, first_instance);
|
||||
vk.CmdDraw(cmd,
|
||||
vertex_count,
|
||||
instance_count,
|
||||
first_vertex,
|
||||
first_instance);
|
||||
}
|
||||
|
||||
/// Calls `vkCmdDrawIndexed` on the builder.
|
||||
#[inline]
|
||||
pub unsafe fn draw_indexed(&mut self, index_count: u32, instance_count: u32, first_index: u32,
|
||||
vertex_offset: i32, first_instance: u32)
|
||||
{
|
||||
pub unsafe fn draw_indexed(&mut self, index_count: u32, instance_count: u32,
|
||||
first_index: u32, vertex_offset: i32, first_instance: u32) {
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdDrawIndexed(cmd, index_count, instance_count, first_index, vertex_offset,
|
||||
vk.CmdDrawIndexed(cmd,
|
||||
index_count,
|
||||
instance_count,
|
||||
first_index,
|
||||
vertex_offset,
|
||||
first_instance);
|
||||
}
|
||||
|
||||
@ -627,15 +686,19 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
|
||||
debug_assert!(draw_count == 0 || ((stride % 4) == 0) &&
|
||||
debug_assert!(draw_count == 0 ||
|
||||
((stride % 4) == 0) &&
|
||||
stride as usize >= mem::size_of::<vk::DrawIndirectCommand>());
|
||||
|
||||
let inner = buffer.inner();
|
||||
debug_assert!(inner.offset < buffer.size());
|
||||
debug_assert!(inner.buffer.usage_indirect_buffer());
|
||||
|
||||
vk.CmdDrawIndirect(cmd, inner.buffer.internal_object(), inner.offset as vk::DeviceSize,
|
||||
draw_count, stride);
|
||||
vk.CmdDrawIndirect(cmd,
|
||||
inner.buffer.internal_object(),
|
||||
inner.offset as vk::DeviceSize,
|
||||
draw_count,
|
||||
stride);
|
||||
}
|
||||
|
||||
/// Calls `vkCmdDrawIndexedIndirect` on the builder.
|
||||
@ -650,8 +713,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert!(inner.offset < buffer.size());
|
||||
debug_assert!(inner.buffer.usage_indirect_buffer());
|
||||
|
||||
vk.CmdDrawIndexedIndirect(cmd, inner.buffer.internal_object(),
|
||||
inner.offset as vk::DeviceSize, draw_count, stride);
|
||||
vk.CmdDrawIndexedIndirect(cmd,
|
||||
inner.buffer.internal_object(),
|
||||
inner.offset as vk::DeviceSize,
|
||||
draw_count,
|
||||
stride);
|
||||
}
|
||||
|
||||
/// Calls `vkCmdEndRenderPass` on the builder.
|
||||
@ -688,14 +754,20 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let size = buffer.size();
|
||||
|
||||
let (buffer_handle, offset) = {
|
||||
let BufferInner { buffer: buffer_inner, offset } = buffer.inner();
|
||||
let BufferInner {
|
||||
buffer: buffer_inner,
|
||||
offset,
|
||||
} = buffer.inner();
|
||||
debug_assert!(buffer_inner.usage_transfer_dest());
|
||||
debug_assert_eq!(offset % 4, 0);
|
||||
(buffer_inner.internal_object(), offset)
|
||||
};
|
||||
|
||||
vk.CmdFillBuffer(cmd, buffer_handle, offset as vk::DeviceSize,
|
||||
size as vk::DeviceSize, data);
|
||||
vk.CmdFillBuffer(cmd,
|
||||
buffer_handle,
|
||||
offset as vk::DeviceSize,
|
||||
size as vk::DeviceSize,
|
||||
data);
|
||||
}
|
||||
|
||||
/// Calls `vkCmdNextSubpass` on the builder.
|
||||
@ -705,8 +777,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
|
||||
let contents = if secondary { vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS }
|
||||
else { vk::SUBPASS_CONTENTS_INLINE };
|
||||
let contents = if secondary {
|
||||
vk::SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
|
||||
} else {
|
||||
vk::SUBPASS_CONTENTS_INLINE
|
||||
};
|
||||
vk.CmdNextSubpass(cmd, contents);
|
||||
}
|
||||
|
||||
@ -727,8 +802,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert_ne!(command.src_stage_mask, 0);
|
||||
debug_assert_ne!(command.dst_stage_mask, 0);
|
||||
|
||||
vk.CmdPipelineBarrier(cmd, command.src_stage_mask, command.dst_stage_mask,
|
||||
command.dependency_flags, command.memory_barriers.len() as u32,
|
||||
vk.CmdPipelineBarrier(cmd,
|
||||
command.src_stage_mask,
|
||||
command.dst_stage_mask,
|
||||
command.dependency_flags,
|
||||
command.memory_barriers.len() as u32,
|
||||
command.memory_barriers.as_ptr(),
|
||||
command.buffer_barriers.len() as u32,
|
||||
command.buffer_barriers.as_ptr(),
|
||||
@ -752,8 +830,11 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert_eq!(offset % 4, 0);
|
||||
debug_assert!(mem::size_of_val(data) >= size as usize);
|
||||
|
||||
vk.CmdPushConstants(cmd, pipeline_layout.sys().internal_object(),
|
||||
stages.into(), offset as u32, size as u32,
|
||||
vk.CmdPushConstants(cmd,
|
||||
pipeline_layout.sys().internal_object(),
|
||||
stages.into(),
|
||||
offset as u32,
|
||||
size as u32,
|
||||
data as *const D as *const _);
|
||||
}
|
||||
|
||||
@ -851,7 +932,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
pub unsafe fn set_scissor<I>(&mut self, first_scissor: u32, scissors: I)
|
||||
where I: Iterator<Item = Scissor>
|
||||
{
|
||||
let scissors = scissors.map(|v| v.clone().into()).collect::<SmallVec<[_; 16]>>();
|
||||
let scissors = scissors
|
||||
.map(|v| v.clone().into())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
if scissors.is_empty() {
|
||||
return;
|
||||
}
|
||||
@ -876,7 +959,9 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
pub unsafe fn set_viewport<I>(&mut self, first_viewport: u32, viewports: I)
|
||||
where I: Iterator<Item = Viewport>
|
||||
{
|
||||
let viewports = viewports.map(|v| v.clone().into()).collect::<SmallVec<[_; 16]>>();
|
||||
let viewports = viewports
|
||||
.map(|v| v.clone().into())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
if viewports.is_empty() {
|
||||
return;
|
||||
}
|
||||
@ -890,7 +975,10 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
|
||||
let vk = self.device().pointers();
|
||||
let cmd = self.internal_object();
|
||||
vk.CmdSetViewport(cmd, first_viewport, viewports.len() as u32, viewports.as_ptr());
|
||||
vk.CmdSetViewport(cmd,
|
||||
first_viewport,
|
||||
viewports.len() as u32,
|
||||
viewports.as_ptr());
|
||||
}
|
||||
|
||||
/// Calls `vkCmdUpdateBuffer` on the builder.
|
||||
@ -908,13 +996,19 @@ impl<P> UnsafeCommandBufferBuilder<P> {
|
||||
debug_assert!(size <= mem::size_of_val(data));
|
||||
|
||||
let (buffer_handle, offset) = {
|
||||
let BufferInner { buffer: buffer_inner, offset } = buffer.inner();
|
||||
let BufferInner {
|
||||
buffer: buffer_inner,
|
||||
offset,
|
||||
} = buffer.inner();
|
||||
debug_assert!(buffer_inner.usage_transfer_dest());
|
||||
debug_assert_eq!(offset % 4, 0);
|
||||
(buffer_inner.internal_object(), offset)
|
||||
};
|
||||
|
||||
vk.CmdUpdateBuffer(cmd, buffer_handle, offset as vk::DeviceSize, size as vk::DeviceSize,
|
||||
vk.CmdUpdateBuffer(cmd,
|
||||
buffer_handle,
|
||||
offset as vk::DeviceSize,
|
||||
size as vk::DeviceSize,
|
||||
data as *const D as *const _);
|
||||
}
|
||||
}
|
||||
@ -976,9 +1070,7 @@ impl UnsafeCommandBufferBuilderExecuteCommands {
|
||||
/// Builds a new empty list.
|
||||
#[inline]
|
||||
pub fn new() -> UnsafeCommandBufferBuilderExecuteCommands {
|
||||
UnsafeCommandBufferBuilderExecuteCommands {
|
||||
raw_cbs: SmallVec::new(),
|
||||
}
|
||||
UnsafeCommandBufferBuilderExecuteCommands { raw_cbs: SmallVec::new() }
|
||||
}
|
||||
|
||||
/// Adds a command buffer to the list.
|
||||
@ -1002,9 +1094,15 @@ pub struct UnsafeCommandBufferBuilderImageAspect {
|
||||
impl UnsafeCommandBufferBuilderImageAspect {
|
||||
pub(crate) fn to_vk_bits(&self) -> vk::ImageAspectFlagBits {
|
||||
let mut out = 0;
|
||||
if self.color { out |= vk::IMAGE_ASPECT_COLOR_BIT };
|
||||
if self.depth { out |= vk::IMAGE_ASPECT_DEPTH_BIT };
|
||||
if self.stencil { out |= vk::IMAGE_ASPECT_STENCIL_BIT };
|
||||
if self.color {
|
||||
out |= vk::IMAGE_ASPECT_COLOR_BIT
|
||||
};
|
||||
if self.depth {
|
||||
out |= vk::IMAGE_ASPECT_DEPTH_BIT
|
||||
};
|
||||
if self.stencil {
|
||||
out |= vk::IMAGE_ASPECT_STENCIL_BIT
|
||||
};
|
||||
out
|
||||
}
|
||||
}
|
||||
@ -1072,8 +1170,10 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
|
||||
self.dst_stage_mask |= other.dst_stage_mask;
|
||||
self.dependency_flags &= other.dependency_flags;
|
||||
|
||||
self.memory_barriers.extend(other.memory_barriers.into_iter());
|
||||
self.buffer_barriers.extend(other.buffer_barriers.into_iter());
|
||||
self.memory_barriers
|
||||
.extend(other.memory_barriers.into_iter());
|
||||
self.buffer_barriers
|
||||
.extend(other.buffer_barriers.into_iter());
|
||||
self.image_barriers.extend(other.image_barriers.into_iter());
|
||||
}
|
||||
|
||||
@ -1087,9 +1187,8 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
|
||||
/// - There are certain rules regarding the pipeline barriers inside render passes.
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn add_execution_dependency(&mut self, source: PipelineStages, dest: PipelineStages,
|
||||
by_region: bool)
|
||||
{
|
||||
pub unsafe fn add_execution_dependency(&mut self, source: PipelineStages,
|
||||
dest: PipelineStages, by_region: bool) {
|
||||
if !by_region {
|
||||
self.dependency_flags = 0;
|
||||
}
|
||||
@ -1113,8 +1212,7 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
|
||||
///
|
||||
pub unsafe fn add_memory_barrier(&mut self, source_stage: PipelineStages,
|
||||
source_access: AccessFlagBits, dest_stage: PipelineStages,
|
||||
dest_access: AccessFlagBits, by_region: bool)
|
||||
{
|
||||
dest_access: AccessFlagBits, by_region: bool) {
|
||||
debug_assert!(source_access.is_compatible_with(&source_stage));
|
||||
debug_assert!(dest_access.is_compatible_with(&dest_stage));
|
||||
|
||||
@ -1143,11 +1241,12 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
|
||||
/// is added.
|
||||
/// - Queue ownership transfers must be correct.
|
||||
///
|
||||
pub unsafe fn add_buffer_memory_barrier<B>
|
||||
(&mut self, buffer: &B, source_stage: PipelineStages,
|
||||
source_access: AccessFlagBits, dest_stage: PipelineStages,
|
||||
pub unsafe fn add_buffer_memory_barrier<B>(&mut self, buffer: &B, source_stage: PipelineStages,
|
||||
source_access: AccessFlagBits,
|
||||
dest_stage: PipelineStages,
|
||||
dest_access: AccessFlagBits, by_region: bool,
|
||||
queue_transfer: Option<(u32, u32)>, offset: usize, size: usize)
|
||||
queue_transfer: Option<(u32, u32)>, offset: usize,
|
||||
size: usize)
|
||||
where B: ?Sized + BufferAccess
|
||||
{
|
||||
debug_assert!(source_access.is_compatible_with(&source_stage));
|
||||
@ -1156,7 +1255,10 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
|
||||
self.add_execution_dependency(source_stage, dest_stage, by_region);
|
||||
|
||||
debug_assert!(size <= buffer.size());
|
||||
let BufferInner { buffer, offset: org_offset } = buffer.inner();
|
||||
let BufferInner {
|
||||
buffer,
|
||||
offset: org_offset,
|
||||
} = buffer.inner();
|
||||
let offset = offset + org_offset;
|
||||
|
||||
let (src_queue, dest_queue) = if let Some((src_queue, dest_queue)) = queue_transfer {
|
||||
@ -1196,10 +1298,12 @@ impl UnsafeCommandBufferBuilderPipelineBarrier {
|
||||
/// - Access flags must be compatible with the image usage flags passed at image creation.
|
||||
///
|
||||
pub unsafe fn add_image_memory_barrier<I>(&mut self, image: &I, mipmaps: Range<u32>,
|
||||
layers: Range<u32>, source_stage: PipelineStages, source_access: AccessFlagBits,
|
||||
dest_stage: PipelineStages, dest_access: AccessFlagBits, by_region: bool,
|
||||
queue_transfer: Option<(u32, u32)>, current_layout: ImageLayout,
|
||||
new_layout: ImageLayout)
|
||||
layers: Range<u32>, source_stage: PipelineStages,
|
||||
source_access: AccessFlagBits,
|
||||
dest_stage: PipelineStages,
|
||||
dest_access: AccessFlagBits, by_region: bool,
|
||||
queue_transfer: Option<(u32, u32)>,
|
||||
current_layout: ImageLayout, new_layout: ImageLayout)
|
||||
where I: ?Sized + ImageAccess
|
||||
{
|
||||
debug_assert!(source_access.is_compatible_with(&source_stage));
|
||||
|
@ -14,6 +14,8 @@ use std::sync::Mutex;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use command_buffer::submit::SubmitAnyBuilder;
|
||||
use command_buffer::submit::SubmitCommandBufferBuilder;
|
||||
@ -21,18 +23,16 @@ use command_buffer::sys::UnsafeCommandBuffer;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use device::Queue;
|
||||
use image::ImageLayout;
|
||||
use image::ImageAccess;
|
||||
use sync::now;
|
||||
use sync::AccessError;
|
||||
use image::ImageLayout;
|
||||
use sync::AccessCheckError;
|
||||
use sync::AccessError;
|
||||
use sync::AccessFlagBits;
|
||||
use sync::FlushError;
|
||||
use sync::NowFuture;
|
||||
use sync::GpuFuture;
|
||||
use sync::NowFuture;
|
||||
use sync::PipelineStages;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use sync::now;
|
||||
|
||||
pub unsafe trait CommandBuffer: DeviceOwned {
|
||||
/// The command pool of the command buffer.
|
||||
@ -114,9 +114,11 @@ pub unsafe trait CommandBuffer: DeviceOwned {
|
||||
#[inline]
|
||||
fn execute_after<F>(self, future: F, queue: Arc<Queue>)
|
||||
-> Result<CommandBufferExecFuture<F, Self>, CommandBufferExecError>
|
||||
where Self: Sized + 'static, F: GpuFuture
|
||||
where Self: Sized + 'static,
|
||||
F: GpuFuture
|
||||
{
|
||||
assert_eq!(self.device().internal_object(), future.device().internal_object());
|
||||
assert_eq!(self.device().internal_object(),
|
||||
future.device().internal_object());
|
||||
|
||||
self.prepare_submit(&future, &queue)?;
|
||||
|
||||
@ -136,7 +138,8 @@ pub unsafe trait CommandBuffer: DeviceOwned {
|
||||
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>;
|
||||
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
|
||||
queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>;
|
||||
|
||||
// FIXME: lots of other methods
|
||||
@ -153,7 +156,10 @@ pub unsafe trait CommandBufferBuild {
|
||||
fn build(self) -> Result<Self::Out, Self::Err>;
|
||||
}
|
||||
|
||||
unsafe impl<T> CommandBuffer for T where T: SafeDeref, T::Target: CommandBuffer {
|
||||
unsafe impl<T> CommandBuffer for T
|
||||
where T: SafeDeref,
|
||||
T::Target: CommandBuffer
|
||||
{
|
||||
type PoolAlloc = <T::Target as CommandBuffer>::PoolAlloc;
|
||||
|
||||
#[inline]
|
||||
@ -162,21 +168,22 @@ unsafe impl<T> CommandBuffer for T where T: SafeDeref, T::Target: CommandBuffer
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue) -> Result<(), CommandBufferExecError> {
|
||||
fn prepare_submit(&self, future: &GpuFuture, queue: &Queue)
|
||||
-> Result<(), CommandBufferExecError> {
|
||||
(**self).prepare_submit(future, queue)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
fn check_buffer_access(
|
||||
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
(**self).check_buffer_access(buffer, exclusive, queue)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
|
||||
queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
(**self).check_image_access(image, layout, exclusive, queue)
|
||||
}
|
||||
}
|
||||
@ -184,7 +191,10 @@ unsafe impl<T> CommandBuffer for T where T: SafeDeref, T::Target: CommandBuffer
|
||||
/// Represents a command buffer being executed by the GPU and the moment when the execution
|
||||
/// finishes.
|
||||
#[must_use = "Dropping this object will immediately block the thread until the GPU has finished processing the submission"]
|
||||
pub struct CommandBufferExecFuture<F, Cb> where F: GpuFuture, Cb: CommandBuffer {
|
||||
pub struct CommandBufferExecFuture<F, Cb>
|
||||
where F: GpuFuture,
|
||||
Cb: CommandBuffer
|
||||
{
|
||||
previous: F,
|
||||
command_buffer: Cb,
|
||||
queue: Arc<Queue>,
|
||||
@ -196,7 +206,8 @@ pub struct CommandBufferExecFuture<F, Cb> where F: GpuFuture, Cb: CommandBuffer
|
||||
}
|
||||
|
||||
unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
|
||||
where F: GpuFuture, Cb: CommandBuffer
|
||||
where F: GpuFuture,
|
||||
Cb: CommandBuffer
|
||||
{
|
||||
#[inline]
|
||||
fn cleanup_finished(&mut self) {
|
||||
@ -204,7 +215,7 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
|
||||
}
|
||||
|
||||
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
|
||||
Ok(match try!(self.previous.build_submission()) {
|
||||
Ok(match self.previous.build_submission()? {
|
||||
SubmitAnyBuilder::Empty => {
|
||||
let mut builder = SubmitCommandBufferBuilder::new();
|
||||
builder.add_command_buffer(self.command_buffer.inner());
|
||||
@ -220,7 +231,8 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
|
||||
builder.add_command_buffer(self.command_buffer.inner());
|
||||
SubmitAnyBuilder::CommandBuffer(builder)
|
||||
},
|
||||
SubmitAnyBuilder::QueuePresent(_) | SubmitAnyBuilder::BindSparse(_) => {
|
||||
SubmitAnyBuilder::QueuePresent(_) |
|
||||
SubmitAnyBuilder::BindSparse(_) => {
|
||||
unimplemented!() // TODO:
|
||||
/*present.submit(); // TODO: wrong
|
||||
let mut builder = SubmitCommandBufferBuilder::new();
|
||||
@ -240,10 +252,10 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
|
||||
|
||||
let queue = self.queue.clone();
|
||||
|
||||
match try!(self.build_submission()) {
|
||||
match self.build_submission()? {
|
||||
SubmitAnyBuilder::Empty => {},
|
||||
SubmitAnyBuilder::CommandBuffer(builder) => {
|
||||
try!(builder.submit(&queue));
|
||||
builder.submit(&queue)?;
|
||||
},
|
||||
_ => unreachable!(),
|
||||
};
|
||||
@ -271,10 +283,11 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
match self.command_buffer.check_buffer_access(buffer, exclusive, queue) {
|
||||
fn check_buffer_access(
|
||||
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
match self.command_buffer
|
||||
.check_buffer_access(buffer, exclusive, queue) {
|
||||
Ok(v) => Ok(v),
|
||||
Err(AccessCheckError::Denied(err)) => Err(AccessCheckError::Denied(err)),
|
||||
Err(AccessCheckError::Unknown) => {
|
||||
@ -284,21 +297,24 @@ unsafe impl<F, Cb> GpuFuture for CommandBufferExecFuture<F, Cb>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
match self.command_buffer.check_image_access(image, layout, exclusive, queue) {
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
|
||||
queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
match self.command_buffer
|
||||
.check_image_access(image, layout, exclusive, queue) {
|
||||
Ok(v) => Ok(v),
|
||||
Err(AccessCheckError::Denied(err)) => Err(AccessCheckError::Denied(err)),
|
||||
Err(AccessCheckError::Unknown) => {
|
||||
self.previous.check_image_access(image, layout, exclusive, queue)
|
||||
self.previous
|
||||
.check_image_access(image, layout, exclusive, queue)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<F, Cb> DeviceOwned for CommandBufferExecFuture<F, Cb>
|
||||
where F: GpuFuture, Cb: CommandBuffer
|
||||
where F: GpuFuture,
|
||||
Cb: CommandBuffer
|
||||
{
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
@ -306,7 +322,10 @@ unsafe impl<F, Cb> DeviceOwned for CommandBufferExecFuture<F, Cb>
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, Cb> Drop for CommandBufferExecFuture<F, Cb> where F: GpuFuture, Cb: CommandBuffer {
|
||||
impl<F, Cb> Drop for CommandBufferExecFuture<F, Cb>
|
||||
where F: GpuFuture,
|
||||
Cb: CommandBuffer
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
if !*self.finished.get_mut() {
|
||||
|
@ -11,10 +11,10 @@ use std::cmp;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use VulkanObject;
|
||||
|
||||
/// Checks whether a copy buffer command is valid.
|
||||
///
|
||||
@ -28,8 +28,10 @@ pub fn check_copy_buffer<S, D>(device: &Device, source: &S, destination: &D)
|
||||
where S: ?Sized + BufferAccess,
|
||||
D: ?Sized + BufferAccess
|
||||
{
|
||||
assert_eq!(source.inner().buffer.device().internal_object(), device.internal_object());
|
||||
assert_eq!(destination.inner().buffer.device().internal_object(), device.internal_object());
|
||||
assert_eq!(source.inner().buffer.device().internal_object(),
|
||||
device.internal_object());
|
||||
assert_eq!(destination.inner().buffer.device().internal_object(),
|
||||
device.internal_object());
|
||||
|
||||
if !source.inner().buffer.usage_transfer_src() {
|
||||
return Err(CheckCopyBufferError::SourceMissingTransferUsage);
|
||||
|
@ -10,10 +10,10 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use VulkanObject;
|
||||
|
||||
/// Checks whether a fill buffer command is valid.
|
||||
///
|
||||
@ -24,7 +24,8 @@ use VulkanObject;
|
||||
pub fn check_fill_buffer<B>(device: &Device, buffer: &B) -> Result<(), CheckFillBufferError>
|
||||
where B: ?Sized + BufferAccess
|
||||
{
|
||||
assert_eq!(buffer.inner().buffer.device().internal_object(), device.internal_object());
|
||||
assert_eq!(buffer.inner().buffer.device().internal_object(),
|
||||
device.internal_object());
|
||||
|
||||
if !buffer.inner().buffer.usage_transfer_dest() {
|
||||
return Err(CheckFillBufferError::BufferMissingUsage);
|
||||
|
@ -9,10 +9,10 @@
|
||||
|
||||
//! Functions that check the validity of commands.
|
||||
|
||||
pub use self::copy_buffer::{check_copy_buffer, CheckCopyBufferError};
|
||||
pub use self::dynamic_state::{check_dynamic_state_validity, CheckDynamicStateValidityError};
|
||||
pub use self::fill_buffer::{check_fill_buffer, CheckFillBufferError};
|
||||
pub use self::update_buffer::{check_update_buffer, CheckUpdateBufferError};
|
||||
pub use self::copy_buffer::{CheckCopyBufferError, check_copy_buffer};
|
||||
pub use self::dynamic_state::{CheckDynamicStateValidityError, check_dynamic_state_validity};
|
||||
pub use self::fill_buffer::{CheckFillBufferError, check_fill_buffer};
|
||||
pub use self::update_buffer::{CheckUpdateBufferError, check_update_buffer};
|
||||
|
||||
mod copy_buffer;
|
||||
mod dynamic_state;
|
||||
|
@ -12,10 +12,10 @@ use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use VulkanObject;
|
||||
|
||||
/// Checks whether an update buffer command is valid.
|
||||
///
|
||||
@ -28,7 +28,8 @@ pub fn check_update_buffer<B, D>(device: &Device, buffer: &B, data: &D)
|
||||
where B: ?Sized + BufferAccess,
|
||||
D: ?Sized
|
||||
{
|
||||
assert_eq!(buffer.inner().buffer.device().internal_object(), device.internal_object());
|
||||
assert_eq!(buffer.inner().buffer.device().internal_object(),
|
||||
device.internal_object());
|
||||
|
||||
if !buffer.inner().buffer.usage_transfer_dest() {
|
||||
return Err(CheckUpdateBufferError::BufferMissingUsage);
|
||||
|
@ -41,9 +41,9 @@
|
||||
//! in a render pass. Can only give access to the same pixel as the one you're processing.
|
||||
//!
|
||||
|
||||
use format::Format;
|
||||
use std::cmp;
|
||||
use std::ops::BitOr;
|
||||
use format::Format;
|
||||
use vk;
|
||||
|
||||
/// Contains the exact description of a single descriptor.
|
||||
@ -77,9 +77,8 @@ impl DescriptorDesc {
|
||||
// TODO: return Result instead of bool
|
||||
#[inline]
|
||||
pub fn is_superset_of(&self, other: &DescriptorDesc) -> bool {
|
||||
self.ty.is_superset_of(&other.ty) &&
|
||||
self.array_count >= other.array_count && self.stages.is_superset_of(&other.stages) &&
|
||||
(!self.readonly || other.readonly)
|
||||
self.ty.is_superset_of(&other.ty) && self.array_count >= other.array_count &&
|
||||
self.stages.is_superset_of(&other.stages) && (!self.readonly || other.readonly)
|
||||
}
|
||||
|
||||
/// Builds a `DescriptorDesc` that is the union of `self` and `other`, if possible.
|
||||
@ -89,7 +88,9 @@ impl DescriptorDesc {
|
||||
// TODO: add example
|
||||
#[inline]
|
||||
pub fn union(&self, other: &DescriptorDesc) -> Option<DescriptorDesc> {
|
||||
if self.ty != other.ty { return None; }
|
||||
if self.ty != other.ty {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(DescriptorDesc {
|
||||
ty: self.ty.clone(),
|
||||
@ -134,12 +135,18 @@ impl DescriptorDescTy {
|
||||
DescriptorDescTy::Sampler => DescriptorType::Sampler,
|
||||
DescriptorDescTy::CombinedImageSampler(_) => DescriptorType::CombinedImageSampler,
|
||||
DescriptorDescTy::Image(ref desc) => {
|
||||
if desc.sampled { DescriptorType::SampledImage }
|
||||
else { DescriptorType::StorageImage }
|
||||
if desc.sampled {
|
||||
DescriptorType::SampledImage
|
||||
} else {
|
||||
DescriptorType::StorageImage
|
||||
}
|
||||
},
|
||||
DescriptorDescTy::InputAttachment { .. } => DescriptorType::InputAttachment,
|
||||
DescriptorDescTy::Buffer(ref desc) => {
|
||||
let dynamic = match desc.dynamic { Some(d) => d, None => return None };
|
||||
let dynamic = match desc.dynamic {
|
||||
Some(d) => d,
|
||||
None => return None,
|
||||
};
|
||||
match (desc.storage, dynamic) {
|
||||
(false, false) => DescriptorType::UniformBuffer,
|
||||
(true, false) => DescriptorType::StorageBuffer,
|
||||
@ -148,8 +155,11 @@ impl DescriptorDescTy {
|
||||
}
|
||||
},
|
||||
DescriptorDescTy::TexelBuffer { storage, .. } => {
|
||||
if storage { DescriptorType::StorageTexelBuffer }
|
||||
else { DescriptorType::UniformTexelBuffer }
|
||||
if storage {
|
||||
DescriptorType::StorageTexelBuffer
|
||||
} else {
|
||||
DescriptorType::UniformTexelBuffer
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -164,14 +174,17 @@ impl DescriptorDescTy {
|
||||
(&DescriptorDescTy::CombinedImageSampler(ref me),
|
||||
&DescriptorDescTy::CombinedImageSampler(ref other)) => me.is_superset_of(other),
|
||||
|
||||
(&DescriptorDescTy::Image(ref me),
|
||||
&DescriptorDescTy::Image(ref other)) => me.is_superset_of(other),
|
||||
(&DescriptorDescTy::Image(ref me), &DescriptorDescTy::Image(ref other)) =>
|
||||
me.is_superset_of(other),
|
||||
|
||||
(&DescriptorDescTy::InputAttachment { multisampled: me_multisampled,
|
||||
array_layers: me_array_layers },
|
||||
&DescriptorDescTy::InputAttachment { multisampled: other_multisampled,
|
||||
array_layers: other_array_layers }) =>
|
||||
{
|
||||
(&DescriptorDescTy::InputAttachment {
|
||||
multisampled: me_multisampled,
|
||||
array_layers: me_array_layers,
|
||||
},
|
||||
&DescriptorDescTy::InputAttachment {
|
||||
multisampled: other_multisampled,
|
||||
array_layers: other_array_layers,
|
||||
}) => {
|
||||
me_multisampled == other_multisampled && me_array_layers == other_array_layers
|
||||
},
|
||||
|
||||
@ -188,9 +201,14 @@ impl DescriptorDescTy {
|
||||
}
|
||||
},
|
||||
|
||||
(&DescriptorDescTy::TexelBuffer { storage: me_storage, format: me_format },
|
||||
&DescriptorDescTy::TexelBuffer { storage: other_storage, format: other_format }) =>
|
||||
{
|
||||
(&DescriptorDescTy::TexelBuffer {
|
||||
storage: me_storage,
|
||||
format: me_format,
|
||||
},
|
||||
&DescriptorDescTy::TexelBuffer {
|
||||
storage: other_storage,
|
||||
format: other_format,
|
||||
}) => {
|
||||
if me_storage != other_storage {
|
||||
return false;
|
||||
}
|
||||
@ -204,7 +222,7 @@ impl DescriptorDescTy {
|
||||
},
|
||||
|
||||
// Any other combination is invalid.
|
||||
_ => false
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -240,7 +258,9 @@ impl DescriptorImageDesc {
|
||||
}
|
||||
|
||||
match (self.format, other.format) {
|
||||
(Some(a), Some(b)) => if a != b { return false; },
|
||||
(Some(a), Some(b)) => if a != b {
|
||||
return false;
|
||||
},
|
||||
(Some(_), None) => (),
|
||||
(None, None) => (),
|
||||
(None, Some(_)) => return false,
|
||||
@ -249,16 +269,17 @@ impl DescriptorImageDesc {
|
||||
match (self.array_layers, other.array_layers) {
|
||||
(DescriptorImageDescArray::NonArrayed, DescriptorImageDescArray::NonArrayed) => (),
|
||||
(DescriptorImageDescArray::Arrayed { max_layers: my_max },
|
||||
DescriptorImageDescArray::Arrayed { max_layers: other_max }) =>
|
||||
{
|
||||
DescriptorImageDescArray::Arrayed { max_layers: other_max }) => {
|
||||
match (my_max, other_max) {
|
||||
(Some(m), Some(o)) => if m < o { return false; },
|
||||
(Some(m), Some(o)) => if m < o {
|
||||
return false;
|
||||
},
|
||||
(Some(_), None) => (),
|
||||
(None, Some(_)) => return false,
|
||||
(None, None) => (), // TODO: is this correct?
|
||||
};
|
||||
},
|
||||
_ => return false
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
true
|
||||
@ -269,7 +290,7 @@ impl DescriptorImageDesc {
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum DescriptorImageDescArray {
|
||||
NonArrayed,
|
||||
Arrayed { max_layers: Option<u32> }
|
||||
Arrayed { max_layers: Option<u32> },
|
||||
}
|
||||
|
||||
// TODO: documentation
|
||||
@ -294,11 +315,10 @@ pub struct DescriptorBufferDesc {
|
||||
pub enum DescriptorBufferContentDesc {
|
||||
F32,
|
||||
F64,
|
||||
Struct {
|
||||
|
||||
},
|
||||
Struct {},
|
||||
Array {
|
||||
len: Box<DescriptorBufferContentDesc>, num_array: usize
|
||||
len: Box<DescriptorBufferContentDesc>,
|
||||
num_array: usize,
|
||||
},
|
||||
}
|
||||
|
||||
@ -403,8 +423,7 @@ impl ShaderStages {
|
||||
(self.vertex || !other.vertex) &&
|
||||
(self.tessellation_control || !other.tessellation_control) &&
|
||||
(self.tessellation_evaluation || !other.tessellation_evaluation) &&
|
||||
(self.geometry || !other.geometry) &&
|
||||
(self.fragment || !other.fragment) &&
|
||||
(self.geometry || !other.geometry) && (self.fragment || !other.fragment) &&
|
||||
(self.compute || !other.compute)
|
||||
}
|
||||
|
||||
@ -415,8 +434,7 @@ impl ShaderStages {
|
||||
(self.vertex && other.vertex) ||
|
||||
(self.tessellation_control && other.tessellation_control) ||
|
||||
(self.tessellation_evaluation && other.tessellation_evaluation) ||
|
||||
(self.geometry && other.geometry) ||
|
||||
(self.fragment && other.fragment) ||
|
||||
(self.geometry && other.geometry) || (self.fragment && other.fragment) ||
|
||||
(self.compute && other.compute)
|
||||
}
|
||||
}
|
||||
@ -442,12 +460,24 @@ impl Into<vk::ShaderStageFlags> for ShaderStages {
|
||||
#[inline]
|
||||
fn into(self) -> vk::ShaderStageFlags {
|
||||
let mut result = 0;
|
||||
if self.vertex { result |= vk::SHADER_STAGE_VERTEX_BIT; }
|
||||
if self.tessellation_control { result |= vk::SHADER_STAGE_TESSELLATION_CONTROL_BIT; }
|
||||
if self.tessellation_evaluation { result |= vk::SHADER_STAGE_TESSELLATION_EVALUATION_BIT; }
|
||||
if self.geometry { result |= vk::SHADER_STAGE_GEOMETRY_BIT; }
|
||||
if self.fragment { result |= vk::SHADER_STAGE_FRAGMENT_BIT; }
|
||||
if self.compute { result |= vk::SHADER_STAGE_COMPUTE_BIT; }
|
||||
if self.vertex {
|
||||
result |= vk::SHADER_STAGE_VERTEX_BIT;
|
||||
}
|
||||
if self.tessellation_control {
|
||||
result |= vk::SHADER_STAGE_TESSELLATION_CONTROL_BIT;
|
||||
}
|
||||
if self.tessellation_evaluation {
|
||||
result |= vk::SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
|
||||
}
|
||||
if self.geometry {
|
||||
result |= vk::SHADER_STAGE_GEOMETRY_BIT;
|
||||
}
|
||||
if self.fragment {
|
||||
result |= vk::SHADER_STAGE_FRAGMENT_BIT;
|
||||
}
|
||||
if self.compute {
|
||||
result |= vk::SHADER_STAGE_COMPUTE_BIT;
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
@ -7,12 +7,12 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::iter;
|
||||
use buffer::BufferAccess;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor_set::DescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSetDesc;
|
||||
use image::ImageAccess;
|
||||
use std::iter;
|
||||
|
||||
/// A collection of descriptor set objects.
|
||||
pub unsafe trait DescriptorSetsCollection {
|
||||
@ -74,7 +74,7 @@ unsafe impl<T> DescriptorSetsCollection for T
|
||||
fn num_bindings_in_set(&self, set: usize) -> Option<usize> {
|
||||
match set {
|
||||
0 => Some(self.num_bindings()),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ unsafe impl<T> DescriptorSetsCollection for T
|
||||
fn descriptor(&self, set: usize, binding: usize) -> Option<DescriptorDesc> {
|
||||
match set {
|
||||
0 => self.descriptor(binding),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,4 +185,29 @@ macro_rules! impl_collection {
|
||||
($i:ident) => ();
|
||||
}
|
||||
|
||||
impl_collection!(Z, Y, X, W, V, U, T, S, R, Q, P, O, N, M, L, K, J, I, H, G, F, E, D, C, B, A);
|
||||
impl_collection!(Z,
|
||||
Y,
|
||||
X,
|
||||
W,
|
||||
V,
|
||||
U,
|
||||
T,
|
||||
S,
|
||||
R,
|
||||
Q,
|
||||
P,
|
||||
O,
|
||||
N,
|
||||
M,
|
||||
L,
|
||||
K,
|
||||
J,
|
||||
I,
|
||||
H,
|
||||
G,
|
||||
F,
|
||||
E,
|
||||
D,
|
||||
C,
|
||||
B,
|
||||
A);
|
||||
|
@ -35,15 +35,15 @@
|
||||
//! - The `DescriptorSetsCollection` trait is implemented on collections of types that implement
|
||||
//! `DescriptorSet`. It is what you pass to the draw functions.
|
||||
|
||||
use SafeDeref;
|
||||
use buffer::BufferAccess;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use image::ImageAccess;
|
||||
use SafeDeref;
|
||||
|
||||
pub use self::collection::DescriptorSetsCollection;
|
||||
pub use self::simple::*;
|
||||
pub use self::std_pool::StdDescriptorPool;
|
||||
pub use self::std_pool::StdDescriptorPoolAlloc;
|
||||
pub use self::simple::*;
|
||||
pub use self::sys::DescriptorPool;
|
||||
pub use self::sys::DescriptorPoolAlloc;
|
||||
pub use self::sys::DescriptorPoolAllocError;
|
||||
@ -77,7 +77,10 @@ pub unsafe trait DescriptorSet: DescriptorSetDesc {
|
||||
fn images_list<'a>(&'a self) -> Box<Iterator<Item = &'a ImageAccess> + 'a>;
|
||||
}
|
||||
|
||||
unsafe impl<T> DescriptorSet for T where T: SafeDeref, T::Target: DescriptorSet {
|
||||
unsafe impl<T> DescriptorSet for T
|
||||
where T: SafeDeref,
|
||||
T::Target: DescriptorSet
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> &UnsafeDescriptorSet {
|
||||
(**self).inner()
|
||||
@ -103,7 +106,10 @@ pub unsafe trait DescriptorSetDesc {
|
||||
fn descriptor(&self, binding: usize) -> Option<DescriptorDesc>;
|
||||
}
|
||||
|
||||
unsafe impl<T> DescriptorSetDesc for T where T: SafeDeref, T::Target: DescriptorSetDesc {
|
||||
unsafe impl<T> DescriptorSetDesc for T
|
||||
where T: SafeDeref,
|
||||
T::Target: DescriptorSetDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_bindings(&self) -> usize {
|
||||
(**self).num_bindings()
|
||||
|
@ -13,14 +13,14 @@ use buffer::BufferAccess;
|
||||
use buffer::BufferViewRef;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::DescriptorType;
|
||||
use descriptor::descriptor_set::DescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSetDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::descriptor_set::DescriptorPool;
|
||||
use descriptor::descriptor_set::DescriptorPoolAlloc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSet;
|
||||
use descriptor::descriptor_set::DescriptorSetDesc;
|
||||
use descriptor::descriptor_set::DescriptorWrite;
|
||||
use descriptor::descriptor_set::StdDescriptorPool;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSet;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
@ -47,13 +47,17 @@ use sync::PipelineStages;
|
||||
///
|
||||
/// # Example
|
||||
// TODO:
|
||||
pub struct SimpleDescriptorSet<R, P = Arc<StdDescriptorPool>> where P: DescriptorPool {
|
||||
pub struct SimpleDescriptorSet<R, P = Arc<StdDescriptorPool>>
|
||||
where P: DescriptorPool
|
||||
{
|
||||
inner: P::Alloc,
|
||||
resources: R,
|
||||
layout: Arc<UnsafeDescriptorSetLayout>
|
||||
layout: Arc<UnsafeDescriptorSetLayout>,
|
||||
}
|
||||
|
||||
impl<R, P> SimpleDescriptorSet<R, P> where P: DescriptorPool {
|
||||
impl<R, P> SimpleDescriptorSet<R, P>
|
||||
where P: DescriptorPool
|
||||
{
|
||||
/// Returns the layout used to create this descriptor set.
|
||||
#[inline]
|
||||
pub fn set_layout(&self) -> &Arc<UnsafeDescriptorSetLayout> {
|
||||
@ -61,7 +65,9 @@ impl<R, P> SimpleDescriptorSet<R, P> where P: DescriptorPool {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<R, P> DescriptorSet for SimpleDescriptorSet<R, P> where P: DescriptorPool {
|
||||
unsafe impl<R, P> DescriptorSet for SimpleDescriptorSet<R, P>
|
||||
where P: DescriptorPool
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> &UnsafeDescriptorSet {
|
||||
self.inner.inner()
|
||||
@ -78,7 +84,9 @@ unsafe impl<R, P> DescriptorSet for SimpleDescriptorSet<R, P> where P: Descripto
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<R, P> DescriptorSetDesc for SimpleDescriptorSet<R, P> where P: DescriptorPool {
|
||||
unsafe impl<R, P> DescriptorSetDesc for SimpleDescriptorSet<R, P>
|
||||
where P: DescriptorPool
|
||||
{
|
||||
#[inline]
|
||||
fn num_bindings(&self) -> usize {
|
||||
unimplemented!() // FIXME:
|
||||
@ -145,7 +153,9 @@ pub struct SimpleDescriptorSetBuilder<L, R> {
|
||||
resources: R,
|
||||
}
|
||||
|
||||
impl<L> SimpleDescriptorSetBuilder<L, ()> where L: PipelineLayoutAbstract {
|
||||
impl<L> SimpleDescriptorSetBuilder<L, ()>
|
||||
where L: PipelineLayoutAbstract
|
||||
{
|
||||
/// Builds a new prototype for a `SimpleDescriptorSet`. Requires a reference to a pipeline
|
||||
/// layout, and the id of the set within the layout.
|
||||
///
|
||||
@ -167,16 +177,22 @@ impl<L> SimpleDescriptorSetBuilder<L, ()> where L: PipelineLayoutAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> SimpleDescriptorSetBuilder<L, R> where L: PipelineLayoutAbstract {
|
||||
impl<L, R> SimpleDescriptorSetBuilder<L, R>
|
||||
where L: PipelineLayoutAbstract
|
||||
{
|
||||
/// Builds a `SimpleDescriptorSet` from the builder.
|
||||
pub fn build(self) -> SimpleDescriptorSet<R, Arc<StdDescriptorPool>> {
|
||||
// TODO: check that we filled everything
|
||||
let pool = Device::standard_descriptor_pool(self.layout.device());
|
||||
let set_layout = self.layout.descriptor_set_layout(self.set_id).unwrap().clone(); // FIXME: error
|
||||
let set_layout = self.layout
|
||||
.descriptor_set_layout(self.set_id)
|
||||
.unwrap()
|
||||
.clone(); // FIXME: error
|
||||
|
||||
let set = unsafe {
|
||||
let mut set = pool.alloc(&set_layout).unwrap(); // FIXME: error
|
||||
set.inner_mut().write(pool.device(), self.writes.into_iter());
|
||||
set.inner_mut()
|
||||
.write(pool.device(), self.writes.into_iter());
|
||||
set
|
||||
};
|
||||
|
||||
@ -200,13 +216,13 @@ pub unsafe trait SimpleDescriptorSetBufferExt<L, R> {
|
||||
}
|
||||
|
||||
unsafe impl<L, R, T> SimpleDescriptorSetBufferExt<L, R> for T
|
||||
where T: BufferAccess, L: PipelineLayoutAbstract
|
||||
where T: BufferAccess,
|
||||
L: PipelineLayoutAbstract
|
||||
{
|
||||
type Out = (R, SimpleDescriptorSetBuf<T>);
|
||||
|
||||
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out>
|
||||
{
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out> {
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
assert_eq!(set_id, i.set_id); // TODO: Result instead
|
||||
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
@ -219,19 +235,20 @@ unsafe impl<L, R, T> SimpleDescriptorSetBufferExt<L, R> for T
|
||||
DescriptorType::StorageBuffer => unsafe {
|
||||
DescriptorWrite::storage_buffer(binding_id as u32, 0, &self)
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
});
|
||||
|
||||
SimpleDescriptorSetBuilder {
|
||||
layout: i.layout,
|
||||
set_id: i.set_id,
|
||||
writes: i.writes,
|
||||
resources: (i.resources, SimpleDescriptorSetBuf {
|
||||
resources: (i.resources,
|
||||
SimpleDescriptorSetBuf {
|
||||
buffer: self,
|
||||
write: !desc.readonly,
|
||||
stage: PipelineStages::none(), // FIXME:
|
||||
access: AccessFlagBits::none(), // FIXME:
|
||||
})
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -248,13 +265,13 @@ pub unsafe trait SimpleDescriptorSetImageExt<L, R> {
|
||||
}
|
||||
|
||||
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for T
|
||||
where T: ImageViewAccess, L: PipelineLayoutAbstract
|
||||
where T: ImageViewAccess,
|
||||
L: PipelineLayoutAbstract
|
||||
{
|
||||
type Out = (R, SimpleDescriptorSetImg<T>);
|
||||
|
||||
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out>
|
||||
{
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out> {
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
assert_eq!(set_id, i.set_id); // TODO: Result instead
|
||||
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
@ -270,14 +287,15 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for T
|
||||
DescriptorType::InputAttachment => {
|
||||
DescriptorWrite::input_attachment(binding_id as u32, 0, &self)
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
});
|
||||
|
||||
SimpleDescriptorSetBuilder {
|
||||
layout: i.layout,
|
||||
set_id: i.set_id,
|
||||
writes: i.writes,
|
||||
resources: (i.resources, SimpleDescriptorSetImg {
|
||||
resources: (i.resources,
|
||||
SimpleDescriptorSetImg {
|
||||
image: self,
|
||||
sampler: None,
|
||||
write: !desc.readonly,
|
||||
@ -288,19 +306,19 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for T
|
||||
layout: ImageLayout::General, // FIXME:
|
||||
stage: PipelineStages::none(), // FIXME:
|
||||
access: AccessFlagBits::none(), // FIXME:
|
||||
})
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for (T, Arc<Sampler>)
|
||||
where T: ImageViewAccess, L: PipelineLayoutAbstract
|
||||
where T: ImageViewAccess,
|
||||
L: PipelineLayoutAbstract
|
||||
{
|
||||
type Out = (R, SimpleDescriptorSetImg<T>);
|
||||
|
||||
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out>
|
||||
{
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out> {
|
||||
let image_view = self.0;
|
||||
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
@ -310,16 +328,20 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for (T, Arc<Sampler>)
|
||||
assert!(desc.array_count == 1); // not implemented
|
||||
i.writes.push(match desc.ty.ty().unwrap() {
|
||||
DescriptorType::CombinedImageSampler => {
|
||||
DescriptorWrite::combined_image_sampler(binding_id as u32, 0, &self.1, &image_view)
|
||||
DescriptorWrite::combined_image_sampler(binding_id as u32,
|
||||
0,
|
||||
&self.1,
|
||||
&image_view)
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
});
|
||||
|
||||
SimpleDescriptorSetBuilder {
|
||||
layout: i.layout,
|
||||
set_id: i.set_id,
|
||||
writes: i.writes,
|
||||
resources: (i.resources, SimpleDescriptorSetImg {
|
||||
resources: (i.resources,
|
||||
SimpleDescriptorSetImg {
|
||||
image: image_view,
|
||||
sampler: Some(self.1),
|
||||
write: !desc.readonly,
|
||||
@ -330,20 +352,20 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for (T, Arc<Sampler>)
|
||||
layout: ImageLayout::General, // FIXME:
|
||||
stage: PipelineStages::none(), // FIXME:
|
||||
access: AccessFlagBits::none(), // FIXME:
|
||||
})
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: DRY
|
||||
unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for Vec<(T, Arc<Sampler>)>
|
||||
where T: ImageViewAccess, L: PipelineLayoutAbstract
|
||||
where T: ImageViewAccess,
|
||||
L: PipelineLayoutAbstract
|
||||
{
|
||||
type Out = (R, Vec<SimpleDescriptorSetImg<T>>);
|
||||
|
||||
fn add_me(self, mut i: SimpleDescriptorSetBuilder<L, R>, name: &str)
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out>
|
||||
{
|
||||
-> SimpleDescriptorSetBuilder<L, Self::Out> {
|
||||
let (set_id, binding_id) = i.layout.descriptor_by_name(name).unwrap(); // TODO: Result instead
|
||||
assert_eq!(set_id, i.set_id); // TODO: Result instead
|
||||
let desc = i.layout.descriptor(set_id, binding_id).unwrap(); // TODO: Result instead
|
||||
@ -354,10 +376,12 @@ unsafe impl<L, R, T> SimpleDescriptorSetImageExt<L, R> for Vec<(T, Arc<Sampler>)
|
||||
for (num, (img, sampler)) in self.into_iter().enumerate() {
|
||||
i.writes.push(match desc.ty.ty().unwrap() {
|
||||
DescriptorType::CombinedImageSampler => {
|
||||
DescriptorWrite::combined_image_sampler(binding_id as u32, num as u32,
|
||||
&sampler, &img)
|
||||
DescriptorWrite::combined_image_sampler(binding_id as u32,
|
||||
num as u32,
|
||||
&sampler,
|
||||
&img)
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
});
|
||||
|
||||
imgs.push(SimpleDescriptorSetImg {
|
||||
@ -428,7 +452,9 @@ pub struct SimpleDescriptorSetBuf<B> {
|
||||
}*/
|
||||
|
||||
/// Internal object related to the `SimpleDescriptorSet` system.
|
||||
pub struct SimpleDescriptorSetBufView<V> where V: BufferViewRef {
|
||||
pub struct SimpleDescriptorSetBufView<V>
|
||||
where V: BufferViewRef
|
||||
{
|
||||
view: V,
|
||||
write: bool,
|
||||
stage: PipelineStages,
|
||||
|
@ -10,16 +10,16 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use descriptor::descriptor_set::DescriptorsCount;
|
||||
use OomError;
|
||||
use descriptor::descriptor_set::DescriptorPool;
|
||||
use descriptor::descriptor_set::DescriptorPoolAlloc;
|
||||
use descriptor::descriptor_set::DescriptorPoolAllocError;
|
||||
use descriptor::descriptor_set::DescriptorsCount;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorPool;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSet;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use OomError;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
|
||||
/// Standard implementation of a descriptor pool.
|
||||
///
|
||||
@ -61,8 +61,7 @@ unsafe impl DescriptorPool for Arc<StdDescriptorPool> {
|
||||
|
||||
// TODO: eventually use a lock-free algorithm?
|
||||
fn alloc(&self, layout: &UnsafeDescriptorSetLayout)
|
||||
-> Result<StdDescriptorPoolAlloc, OomError>
|
||||
{
|
||||
-> Result<StdDescriptorPoolAlloc, OomError> {
|
||||
let mut pools = self.pools.lock().unwrap();
|
||||
|
||||
// Try find an existing pool with some free space.
|
||||
@ -105,7 +104,7 @@ unsafe impl DescriptorPool for Arc<StdDescriptorPool> {
|
||||
let count = layout.descriptors_count().clone() * 40;
|
||||
// Failure to allocate a new pool results in an error for the whole function because
|
||||
// there's no way we can recover from that.
|
||||
let mut new_pool = try!(UnsafeDescriptorPool::new(self.device.clone(), &count, 40, true));
|
||||
let mut new_pool = UnsafeDescriptorPool::new(self.device.clone(), &count, 40, true)?;
|
||||
|
||||
let alloc = unsafe {
|
||||
match new_pool.alloc(Some(layout)) {
|
||||
@ -125,7 +124,8 @@ unsafe impl DescriptorPool for Arc<StdDescriptorPool> {
|
||||
|
||||
let pool_obj = Arc::new(Mutex::new(Pool {
|
||||
pool: new_pool,
|
||||
remaining_capacity: count - *layout.descriptors_count(),
|
||||
remaining_capacity: count -
|
||||
*layout.descriptors_count(),
|
||||
remaining_sets_count: 40 - 1,
|
||||
}));
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::cmp;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
@ -15,7 +16,6 @@ use std::ops;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::vec::IntoIter as VecIntoIter;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use buffer::BufferAccess;
|
||||
use buffer::BufferInner;
|
||||
@ -27,9 +27,9 @@ use device::DeviceOwned;
|
||||
use image::ImageViewAccess;
|
||||
use sampler::Sampler;
|
||||
|
||||
use check_errors;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
/// A pool from which descriptor sets can be allocated.
|
||||
@ -238,8 +238,8 @@ impl UnsafeDescriptorPool {
|
||||
/// - Panics if `max_sets` is 0.
|
||||
///
|
||||
pub fn new(device: Arc<Device>, count: &DescriptorsCount, max_sets: u32,
|
||||
free_descriptor_set_bit: bool) -> Result<UnsafeDescriptorPool, OomError>
|
||||
{
|
||||
free_descriptor_set_bit: bool)
|
||||
-> Result<UnsafeDescriptorPool, OomError> {
|
||||
let vk = device.pointers();
|
||||
|
||||
assert_ne!(max_sets, 0, "The maximum number of sets can't be 0");
|
||||
@ -259,17 +259,23 @@ impl UnsafeDescriptorPool {
|
||||
|
||||
elem!(uniform_buffer, vk::DESCRIPTOR_TYPE_UNIFORM_BUFFER);
|
||||
elem!(storage_buffer, vk::DESCRIPTOR_TYPE_STORAGE_BUFFER);
|
||||
elem!(uniform_buffer_dynamic, vk::DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
elem!(storage_buffer_dynamic, vk::DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
|
||||
elem!(uniform_texel_buffer, vk::DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
|
||||
elem!(storage_texel_buffer, vk::DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
|
||||
elem!(uniform_buffer_dynamic,
|
||||
vk::DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
elem!(storage_buffer_dynamic,
|
||||
vk::DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
|
||||
elem!(uniform_texel_buffer,
|
||||
vk::DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
|
||||
elem!(storage_texel_buffer,
|
||||
vk::DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
|
||||
elem!(sampled_image, vk::DESCRIPTOR_TYPE_SAMPLED_IMAGE);
|
||||
elem!(storage_image, vk::DESCRIPTOR_TYPE_STORAGE_IMAGE);
|
||||
elem!(sampler, vk::DESCRIPTOR_TYPE_SAMPLER);
|
||||
elem!(combined_image_sampler, vk::DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
|
||||
elem!(combined_image_sampler,
|
||||
vk::DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
|
||||
elem!(input_attachment, vk::DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
|
||||
|
||||
assert!(!pool_sizes.is_empty(), "All the descriptors count of a pool are 0");
|
||||
assert!(!pool_sizes.is_empty(),
|
||||
"All the descriptors count of a pool are 0");
|
||||
|
||||
let pool = unsafe {
|
||||
let infos = vk::DescriptorPoolCreateInfo {
|
||||
@ -286,8 +292,10 @@ impl UnsafeDescriptorPool {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateDescriptorPool(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateDescriptorPool(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -321,19 +329,23 @@ impl UnsafeDescriptorPool {
|
||||
-> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError>
|
||||
where I: IntoIterator<Item = &'l UnsafeDescriptorSetLayout>
|
||||
{
|
||||
let layouts: SmallVec<[_; 8]> = layouts.into_iter().map(|l| {
|
||||
assert_eq!(self.device.internal_object(), l.device().internal_object(),
|
||||
"Tried to allocate from a pool with a set layout of a different device");
|
||||
let layouts: SmallVec<[_; 8]> = layouts
|
||||
.into_iter()
|
||||
.map(|l| {
|
||||
assert_eq!(self.device.internal_object(),
|
||||
l.device().internal_object(),
|
||||
"Tried to allocate from a pool with a set layout of a different \
|
||||
device");
|
||||
l.internal_object()
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.alloc_impl(&layouts)
|
||||
}
|
||||
|
||||
// Actual implementation of `alloc`. Separated so that it is not inlined.
|
||||
unsafe fn alloc_impl(&mut self, layouts: &SmallVec<[vk::DescriptorSetLayout; 8]>)
|
||||
-> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError>
|
||||
{
|
||||
-> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError> {
|
||||
let num = layouts.len();
|
||||
|
||||
if num == 0 {
|
||||
@ -351,8 +363,8 @@ impl UnsafeDescriptorPool {
|
||||
let mut output = Vec::with_capacity(num);
|
||||
|
||||
let vk = self.device.pointers();
|
||||
let ret = vk.AllocateDescriptorSets(self.device.internal_object(), &infos,
|
||||
output.as_mut_ptr());
|
||||
let ret =
|
||||
vk.AllocateDescriptorSets(self.device.internal_object(), &infos, output.as_mut_ptr());
|
||||
|
||||
// According to the specs, because `VK_ERROR_FRAGMENTED_POOL` was added after version
|
||||
// 1.0 of Vulkan, any negative return value except out-of-memory errors must be
|
||||
@ -370,14 +382,12 @@ impl UnsafeDescriptorPool {
|
||||
c if (c as i32) < 0 => {
|
||||
return Err(DescriptorPoolAllocError::FragmentedPool);
|
||||
},
|
||||
_ => ()
|
||||
_ => (),
|
||||
};
|
||||
|
||||
output.set_len(num);
|
||||
|
||||
Ok(UnsafeDescriptorPoolAllocIter {
|
||||
sets: output.into_iter(),
|
||||
})
|
||||
Ok(UnsafeDescriptorPoolAllocIter { sets: output.into_iter() })
|
||||
}
|
||||
|
||||
/// Frees some descriptor sets.
|
||||
@ -406,11 +416,12 @@ impl UnsafeDescriptorPool {
|
||||
|
||||
// Actual implementation of `free`. Separated so that it is not inlined.
|
||||
unsafe fn free_impl(&mut self, sets: &SmallVec<[vk::DescriptorSet; 8]>)
|
||||
-> Result<(), OomError>
|
||||
{
|
||||
-> Result<(), OomError> {
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.FreeDescriptorSets(self.device.internal_object(), self.pool,
|
||||
sets.len() as u32, sets.as_ptr())));
|
||||
check_errors(vk.FreeDescriptorSets(self.device.internal_object(),
|
||||
self.pool,
|
||||
sets.len() as u32,
|
||||
sets.as_ptr()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -419,8 +430,9 @@ impl UnsafeDescriptorPool {
|
||||
/// This destroys all descriptor sets and empties the pool.
|
||||
pub unsafe fn reset(&mut self) -> Result<(), OomError> {
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.ResetDescriptorPool(self.device.internal_object(), self.pool,
|
||||
0 /* reserved flags */)));
|
||||
check_errors(vk.ResetDescriptorPool(self.device.internal_object(),
|
||||
self.pool,
|
||||
0 /* reserved flags */))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -479,7 +491,7 @@ impl error::Error for DescriptorPoolAllocError {
|
||||
},
|
||||
DescriptorPoolAllocError::OutOfPoolMemory => {
|
||||
"there is no more space available in the descriptor pool"
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -502,9 +514,7 @@ impl Iterator for UnsafeDescriptorPoolAllocIter {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<UnsafeDescriptorSet> {
|
||||
self.sets.next().map(|s| UnsafeDescriptorSet {
|
||||
set: s,
|
||||
})
|
||||
self.sets.next().map(|s| UnsafeDescriptorSet { set: s })
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -685,25 +695,28 @@ impl UnsafeDescriptorSet {
|
||||
for (i, write) in raw_writes.iter_mut().enumerate() {
|
||||
write.pImageInfo = match raw_writes_img_infos[i] {
|
||||
Some(off) => image_descriptors.as_ptr().offset(off as isize),
|
||||
None => ptr::null()
|
||||
None => ptr::null(),
|
||||
};
|
||||
|
||||
write.pBufferInfo = match raw_writes_buf_infos[i] {
|
||||
Some(off) => buffer_descriptors.as_ptr().offset(off as isize),
|
||||
None => ptr::null()
|
||||
None => ptr::null(),
|
||||
};
|
||||
|
||||
write.pTexelBufferView = match raw_writes_buf_view_infos[i] {
|
||||
Some(off) => buffer_views_descriptors.as_ptr().offset(off as isize),
|
||||
None => ptr::null()
|
||||
None => ptr::null(),
|
||||
};
|
||||
}
|
||||
|
||||
// It is forbidden to call `vkUpdateDescriptorSets` with 0 writes, so we need to perform
|
||||
// this emptiness check.
|
||||
if !raw_writes.is_empty() {
|
||||
vk.UpdateDescriptorSets(device.internal_object(), raw_writes.len() as u32,
|
||||
raw_writes.as_ptr(), 0, ptr::null());
|
||||
vk.UpdateDescriptorSets(device.internal_object(),
|
||||
raw_writes.len() as u32,
|
||||
raw_writes.as_ptr(),
|
||||
0,
|
||||
ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -762,7 +775,8 @@ impl DescriptorWrite {
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!({
|
||||
let layout = image.descriptor_set_storage_image_layout() as u32;
|
||||
DescriptorWriteInner::StorageImage(image.inner().internal_object(), layout)
|
||||
DescriptorWriteInner::StorageImage(image.inner().internal_object(),
|
||||
layout)
|
||||
}),
|
||||
}
|
||||
}
|
||||
@ -772,7 +786,7 @@ impl DescriptorWrite {
|
||||
DescriptorWrite {
|
||||
binding: binding,
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!(DescriptorWriteInner::Sampler(sampler.internal_object()))
|
||||
inner: smallvec!(DescriptorWriteInner::Sampler(sampler.internal_object())),
|
||||
}
|
||||
}
|
||||
|
||||
@ -785,29 +799,40 @@ impl DescriptorWrite {
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!({
|
||||
let layout = image.descriptor_set_sampled_image_layout() as u32;
|
||||
DescriptorWriteInner::SampledImage(image.inner().internal_object(), layout)
|
||||
DescriptorWriteInner::SampledImage(image.inner().internal_object(),
|
||||
layout)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn combined_image_sampler<I>(binding: u32, array_element: u32, sampler: &Arc<Sampler>, image: &I) -> DescriptorWrite
|
||||
pub fn combined_image_sampler<I>(binding: u32, array_element: u32, sampler: &Arc<Sampler>,
|
||||
image: &I)
|
||||
-> DescriptorWrite
|
||||
where I: ImageViewAccess
|
||||
{
|
||||
DescriptorWrite {
|
||||
binding: binding,
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!({
|
||||
let layout = image.descriptor_set_combined_image_sampler_layout() as u32;
|
||||
DescriptorWriteInner::CombinedImageSampler(sampler.internal_object(), image.inner().internal_object(), layout)
|
||||
let layout =
|
||||
image.descriptor_set_combined_image_sampler_layout() as u32;
|
||||
DescriptorWriteInner::CombinedImageSampler(sampler
|
||||
.internal_object(),
|
||||
image
|
||||
.inner()
|
||||
.internal_object(),
|
||||
layout)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &Arc<BufferView<F, B>>) -> DescriptorWrite
|
||||
pub fn uniform_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
|
||||
view: &Arc<BufferView<F, B>>)
|
||||
-> DescriptorWrite
|
||||
where B: BufferAccess,
|
||||
F: 'static + Send + Sync,
|
||||
F: 'static + Send + Sync
|
||||
{
|
||||
assert!(view.uniform_texel_buffer());
|
||||
|
||||
@ -819,9 +844,11 @@ impl DescriptorWrite {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32, view: &Arc<BufferView<F, B>>) -> DescriptorWrite
|
||||
pub fn storage_texel_buffer<'a, F, B>(binding: u32, array_element: u32,
|
||||
view: &Arc<BufferView<F, B>>)
|
||||
-> DescriptorWrite
|
||||
where B: BufferAccess + 'static,
|
||||
F: 'static + Send + Sync,
|
||||
F: 'static + Send + Sync
|
||||
{
|
||||
assert!(view.storage_texel_buffer());
|
||||
|
||||
@ -843,7 +870,9 @@ impl DescriptorWrite {
|
||||
binding: binding,
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!({
|
||||
DescriptorWriteInner::UniformBuffer(buffer.internal_object(), offset, size)
|
||||
DescriptorWriteInner::UniformBuffer(buffer.internal_object(),
|
||||
offset,
|
||||
size)
|
||||
}),
|
||||
}
|
||||
}
|
||||
@ -859,13 +888,16 @@ impl DescriptorWrite {
|
||||
binding: binding,
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!({
|
||||
DescriptorWriteInner::StorageBuffer(buffer.internal_object(), offset, size)
|
||||
DescriptorWriteInner::StorageBuffer(buffer.internal_object(),
|
||||
offset,
|
||||
size)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn dynamic_uniform_buffer<B>(binding: u32, array_element: u32, buffer: &B) -> DescriptorWrite
|
||||
pub unsafe fn dynamic_uniform_buffer<B>(binding: u32, array_element: u32, buffer: &B)
|
||||
-> DescriptorWrite
|
||||
where B: BufferAccess
|
||||
{
|
||||
let size = buffer.size();
|
||||
@ -875,12 +907,14 @@ impl DescriptorWrite {
|
||||
binding: binding,
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!(DescriptorWriteInner::DynamicUniformBuffer(buffer.internal_object(),
|
||||
offset, size)),
|
||||
offset,
|
||||
size)),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn dynamic_storage_buffer<B>(binding: u32, array_element: u32, buffer: &B) -> DescriptorWrite
|
||||
pub unsafe fn dynamic_storage_buffer<B>(binding: u32, array_element: u32, buffer: &B)
|
||||
-> DescriptorWrite
|
||||
where B: BufferAccess
|
||||
{
|
||||
let size = buffer.size();
|
||||
@ -890,7 +924,8 @@ impl DescriptorWrite {
|
||||
binding: binding,
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!(DescriptorWriteInner::DynamicStorageBuffer(buffer.internal_object(),
|
||||
offset, size)),
|
||||
offset,
|
||||
size)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -903,7 +938,10 @@ impl DescriptorWrite {
|
||||
first_array_element: array_element,
|
||||
inner: smallvec!({
|
||||
let layout = image.descriptor_set_input_attachment_layout() as u32;
|
||||
DescriptorWriteInner::InputAttachment(image.inner().internal_object(), layout)
|
||||
DescriptorWriteInner::InputAttachment(image
|
||||
.inner()
|
||||
.internal_object(),
|
||||
layout)
|
||||
}),
|
||||
}
|
||||
}
|
||||
@ -913,15 +951,18 @@ impl DescriptorWrite {
|
||||
pub fn ty(&self) -> DescriptorType {
|
||||
match self.inner[0] {
|
||||
DescriptorWriteInner::Sampler(_) => DescriptorType::Sampler,
|
||||
DescriptorWriteInner::CombinedImageSampler(_, _, _) => DescriptorType::CombinedImageSampler,
|
||||
DescriptorWriteInner::CombinedImageSampler(_, _, _) =>
|
||||
DescriptorType::CombinedImageSampler,
|
||||
DescriptorWriteInner::SampledImage(_, _) => DescriptorType::SampledImage,
|
||||
DescriptorWriteInner::StorageImage(_, _) => DescriptorType::StorageImage,
|
||||
DescriptorWriteInner::UniformTexelBuffer(_) => DescriptorType::UniformTexelBuffer,
|
||||
DescriptorWriteInner::StorageTexelBuffer(_) => DescriptorType::StorageTexelBuffer,
|
||||
DescriptorWriteInner::UniformBuffer(_, _, _) => DescriptorType::UniformBuffer,
|
||||
DescriptorWriteInner::StorageBuffer(_, _, _) => DescriptorType::StorageBuffer,
|
||||
DescriptorWriteInner::DynamicUniformBuffer(_, _, _) => DescriptorType::UniformBufferDynamic,
|
||||
DescriptorWriteInner::DynamicStorageBuffer(_, _, _) => DescriptorType::StorageBufferDynamic,
|
||||
DescriptorWriteInner::DynamicUniformBuffer(_, _, _) =>
|
||||
DescriptorType::UniformBufferDynamic,
|
||||
DescriptorWriteInner::DynamicStorageBuffer(_, _, _) =>
|
||||
DescriptorType::StorageBufferDynamic,
|
||||
DescriptorWriteInner::InputAttachment(_, _) => DescriptorType::InputAttachment,
|
||||
}
|
||||
}
|
||||
@ -929,15 +970,15 @@ impl DescriptorWrite {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use descriptor::descriptor::DescriptorBufferContentDesc;
|
||||
use descriptor::descriptor::DescriptorBufferDesc;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::DescriptorDescTy;
|
||||
use descriptor::descriptor::DescriptorBufferDesc;
|
||||
use descriptor::descriptor::DescriptorBufferContentDesc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::DescriptorsCount;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorPool;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use std::iter;
|
||||
|
||||
#[test]
|
||||
fn pool_create() {
|
||||
@ -984,7 +1025,8 @@ mod tests {
|
||||
readonly: true,
|
||||
};
|
||||
|
||||
let set_layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(layout))).unwrap();
|
||||
let set_layout = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(layout)))
|
||||
.unwrap();
|
||||
|
||||
let desc = DescriptorsCount {
|
||||
uniform_buffer: 10,
|
||||
|
@ -7,15 +7,15 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use check_errors;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
@ -49,10 +49,13 @@ impl UnsafeDescriptorSetLayout {
|
||||
{
|
||||
let mut descriptors_count = DescriptorsCount::zero();
|
||||
|
||||
let bindings = descriptors.into_iter().enumerate().filter_map(|(binding, desc)| {
|
||||
let bindings = descriptors
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.filter_map(|(binding, desc)| {
|
||||
let desc = match desc {
|
||||
Some(d) => d,
|
||||
None => return None
|
||||
None => return None,
|
||||
};
|
||||
|
||||
// FIXME: it is not legal to pass eg. the TESSELLATION_SHADER bit when the device
|
||||
@ -68,7 +71,8 @@ impl UnsafeDescriptorSetLayout {
|
||||
stageFlags: desc.stages.into(),
|
||||
pImmutableSamplers: ptr::null(), // FIXME: not yet implemented
|
||||
})
|
||||
}).collect::<SmallVec<[_; 32]>>();
|
||||
})
|
||||
.collect::<SmallVec<[_; 32]>>();
|
||||
|
||||
// Note that it seems legal to have no descriptor at all in the set.
|
||||
|
||||
@ -83,8 +87,10 @@ impl UnsafeDescriptorSetLayout {
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
let vk = device.pointers();
|
||||
try!(check_errors(vk.CreateDescriptorSetLayout(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateDescriptorSetLayout(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -132,22 +138,21 @@ impl Drop for UnsafeDescriptorSetLayout {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
vk.DestroyDescriptorSetLayout(self.device.internal_object(), self.layout,
|
||||
ptr::null());
|
||||
vk.DestroyDescriptorSetLayout(self.device.internal_object(), self.layout, ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter;
|
||||
use descriptor::descriptor::DescriptorBufferContentDesc;
|
||||
use descriptor::descriptor::DescriptorBufferDesc;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::DescriptorDescTy;
|
||||
use descriptor::descriptor::DescriptorBufferDesc;
|
||||
use descriptor::descriptor::DescriptorBufferContentDesc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::DescriptorsCount;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use std::iter;
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
@ -172,7 +177,8 @@ mod tests {
|
||||
|
||||
let sl = UnsafeDescriptorSetLayout::new(device.clone(), iter::once(Some(layout))).unwrap();
|
||||
|
||||
assert_eq!(sl.descriptors_count(), &DescriptorsCount {
|
||||
assert_eq!(sl.descriptors_count(),
|
||||
&DescriptorsCount {
|
||||
uniform_buffer: 1,
|
||||
..DescriptorsCount::zero()
|
||||
});
|
||||
|
@ -57,10 +57,10 @@ pub use self::traits::PipelineLayoutAbstract;
|
||||
pub use self::traits::PipelineLayoutDesc;
|
||||
pub use self::traits::PipelineLayoutDescNames;
|
||||
pub use self::traits::PipelineLayoutDescPcRange;
|
||||
pub use self::traits::PipelineLayoutSuperset;
|
||||
pub use self::traits::PipelineLayoutNotSupersetError;
|
||||
pub use self::traits::PipelineLayoutSetsCompatible;
|
||||
pub use self::traits::PipelineLayoutPushConstantsCompatible;
|
||||
pub use self::traits::PipelineLayoutSetsCompatible;
|
||||
pub use self::traits::PipelineLayoutSuperset;
|
||||
pub use self::union::PipelineLayoutDescUnion;
|
||||
|
||||
mod empty;
|
||||
|
@ -7,26 +7,26 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use check_errors;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
|
||||
@ -39,7 +39,9 @@ pub struct PipelineLayout<L> {
|
||||
desc: L,
|
||||
}
|
||||
|
||||
impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
impl<L> PipelineLayout<L>
|
||||
where L: PipelineLayoutDesc
|
||||
{
|
||||
/// Creates a new `PipelineLayout`.
|
||||
///
|
||||
/// # Panic
|
||||
@ -48,8 +50,7 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
/// device than the one passed as parameter.
|
||||
#[inline]
|
||||
pub fn new(device: Arc<Device>, desc: L)
|
||||
-> Result<PipelineLayout<L>, PipelineLayoutCreationError>
|
||||
{
|
||||
-> Result<PipelineLayout<L>, PipelineLayoutCreationError> {
|
||||
let vk = device.pointers();
|
||||
let limits = device.physical_device().limits();
|
||||
|
||||
@ -59,13 +60,16 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
for num in 0 .. desc.num_sets() {
|
||||
layouts.push(match desc.provided_set_layout(num) {
|
||||
Some(l) => {
|
||||
assert_eq!(l.device().internal_object(), device.internal_object());
|
||||
assert_eq!(l.device().internal_object(),
|
||||
device.internal_object());
|
||||
l
|
||||
},
|
||||
None => {
|
||||
let sets_iter = 0 .. desc.num_bindings_in_set(num).unwrap_or(0);
|
||||
let sets_iter = 0 ..
|
||||
desc.num_bindings_in_set(num).unwrap_or(0);
|
||||
let desc_iter = sets_iter.map(|d| desc.descriptor(num, d));
|
||||
Arc::new(try!(UnsafeDescriptorSetLayout::new(device.clone(), desc_iter)))
|
||||
Arc::new(UnsafeDescriptorSetLayout::new(device.clone(),
|
||||
desc_iter)?)
|
||||
},
|
||||
});
|
||||
}
|
||||
@ -73,9 +77,10 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
};
|
||||
|
||||
// Grab the list of `vkDescriptorSetLayout` objects from `layouts`.
|
||||
let layouts_ids = layouts.iter().map(|l| {
|
||||
l.internal_object()
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
let layouts_ids = layouts
|
||||
.iter()
|
||||
.map(|l| l.internal_object())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
// FIXME: must also check per-descriptor-type limits (eg. max uniform buffer descriptors)
|
||||
|
||||
@ -88,7 +93,11 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
let mut out: SmallVec<[_; 8]> = SmallVec::new();
|
||||
|
||||
for pc_id in 0 .. desc.num_push_constants_ranges() {
|
||||
let PipelineLayoutDescPcRange { offset, size, stages } = {
|
||||
let PipelineLayoutDescPcRange {
|
||||
offset,
|
||||
size,
|
||||
stages,
|
||||
} = {
|
||||
match desc.push_constants_range(pc_id) {
|
||||
Some(o) => o,
|
||||
None => continue,
|
||||
@ -145,8 +154,10 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreatePipelineLayout(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreatePipelineLayout(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -159,7 +170,9 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
}
|
||||
}
|
||||
|
||||
impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
impl<L> PipelineLayout<L>
|
||||
where L: PipelineLayoutDesc
|
||||
{
|
||||
/// Returns the description of the pipeline layout.
|
||||
#[inline]
|
||||
pub fn desc(&self) -> &L {
|
||||
@ -167,7 +180,9 @@ impl<L> PipelineLayout<L> where L: PipelineLayoutDesc {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> PipelineLayoutAbstract for PipelineLayout<D> where D: PipelineLayoutDescNames {
|
||||
unsafe impl<D> PipelineLayoutAbstract for PipelineLayout<D>
|
||||
where D: PipelineLayoutDescNames
|
||||
{
|
||||
#[inline]
|
||||
fn sys(&self) -> PipelineLayoutSys {
|
||||
PipelineLayoutSys(&self.layout)
|
||||
@ -179,7 +194,9 @@ unsafe impl<D> PipelineLayoutAbstract for PipelineLayout<D> where D: PipelineLay
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> PipelineLayoutDesc for PipelineLayout<D> where D: PipelineLayoutDesc {
|
||||
unsafe impl<D> PipelineLayoutDesc for PipelineLayout<D>
|
||||
where D: PipelineLayoutDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
self.desc.num_sets()
|
||||
@ -206,7 +223,9 @@ unsafe impl<D> PipelineLayoutDesc for PipelineLayout<D> where D: PipelineLayoutD
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> PipelineLayoutDescNames for PipelineLayout<D> where D: PipelineLayoutDescNames {
|
||||
unsafe impl<D> PipelineLayoutDescNames for PipelineLayout<D>
|
||||
where D: PipelineLayoutDescNames
|
||||
{
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
self.desc.descriptor_by_name(name)
|
||||
@ -220,7 +239,9 @@ unsafe impl<D> DeviceOwned for PipelineLayout<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> fmt::Debug for PipelineLayout<D> where D: fmt::Debug {
|
||||
impl<D> fmt::Debug for PipelineLayout<D>
|
||||
where D: fmt::Debug
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("PipelineLayout")
|
||||
.field("raw", &self.layout)
|
||||
@ -293,7 +314,7 @@ impl error::Error for PipelineLayoutCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
PipelineLayoutCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -322,7 +343,7 @@ impl From<Error> for PipelineLayoutCreationError {
|
||||
err @ Error::OutOfDeviceMemory => {
|
||||
PipelineLayoutCreationError::OomError(OomError::from(err))
|
||||
},
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,22 +7,22 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::cmp;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::cmp;
|
||||
use std::sync::Arc;
|
||||
|
||||
use SafeDeref;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor::ShaderStages;
|
||||
use descriptor::descriptor_set::DescriptorSetsCollection;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutCreationError;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescUnion;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSys;
|
||||
use descriptor::pipeline_layout::PipelineLayoutCreationError;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use SafeDeref;
|
||||
|
||||
/// Trait for objects that describe the layout of the descriptors and push constants of a pipeline.
|
||||
// TODO: meh for PipelineLayoutDescNames ; the `Names` thing shouldn't be mandatory
|
||||
@ -40,7 +40,10 @@ pub unsafe trait PipelineLayoutAbstract: PipelineLayoutDescNames + DeviceOwned {
|
||||
fn descriptor_set_layout(&self, index: usize) -> Option<&Arc<UnsafeDescriptorSetLayout>>;
|
||||
}
|
||||
|
||||
unsafe impl<T> PipelineLayoutAbstract for T where T: SafeDeref, T::Target: PipelineLayoutAbstract {
|
||||
unsafe impl<T> PipelineLayoutAbstract for T
|
||||
where T: SafeDeref,
|
||||
T::Target: PipelineLayoutAbstract
|
||||
{
|
||||
#[inline]
|
||||
fn sys(&self) -> PipelineLayoutSys {
|
||||
(**self).sys()
|
||||
@ -91,7 +94,9 @@ pub unsafe trait PipelineLayoutDesc {
|
||||
|
||||
/// Builds the union of this layout and another.
|
||||
#[inline]
|
||||
fn union<T>(self, other: T) -> PipelineLayoutDescUnion<Self, T> where Self: Sized {
|
||||
fn union<T>(self, other: T) -> PipelineLayoutDescUnion<Self, T>
|
||||
where Self: Sized
|
||||
{
|
||||
PipelineLayoutDescUnion::new(self, other)
|
||||
}
|
||||
|
||||
@ -99,8 +104,7 @@ pub unsafe trait PipelineLayoutDesc {
|
||||
///
|
||||
/// > **Note**: This is just a shortcut for `PipelineLayout::new`.
|
||||
#[inline]
|
||||
fn build(self, device: Arc<Device>)
|
||||
-> Result<PipelineLayout<Self>, PipelineLayoutCreationError>
|
||||
fn build(self, device: Arc<Device>) -> Result<PipelineLayout<Self>, PipelineLayoutCreationError>
|
||||
where Self: Sized
|
||||
{
|
||||
PipelineLayout::new(device, self)
|
||||
@ -120,7 +124,10 @@ pub struct PipelineLayoutDescPcRange {
|
||||
pub stages: ShaderStages,
|
||||
}
|
||||
|
||||
unsafe impl<T> PipelineLayoutDesc for T where T: SafeDeref, T::Target: PipelineLayoutDesc {
|
||||
unsafe impl<T> PipelineLayoutDesc for T
|
||||
where T: SafeDeref,
|
||||
T::Target: PipelineLayoutDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
(**self).num_sets()
|
||||
@ -155,7 +162,10 @@ pub unsafe trait PipelineLayoutDescNames: PipelineLayoutDesc {
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)>;
|
||||
}
|
||||
|
||||
unsafe impl<T> PipelineLayoutDescNames for T where T: SafeDeref, T::Target: PipelineLayoutDescNames {
|
||||
unsafe impl<T> PipelineLayoutDescNames for T
|
||||
where T: SafeDeref,
|
||||
T::Target: PipelineLayoutDescNames
|
||||
{
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
(**self).descriptor_by_name(name)
|
||||
@ -174,7 +184,8 @@ pub unsafe trait PipelineLayoutSuperset<Other: ?Sized>: PipelineLayoutDesc
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSuperset<U> for T
|
||||
where T: PipelineLayoutDesc, U: PipelineLayoutDesc
|
||||
where T: PipelineLayoutDesc,
|
||||
U: PipelineLayoutDesc
|
||||
{
|
||||
fn ensure_superset_of(&self, other: &U) -> Result<(), PipelineLayoutNotSupersetError> {
|
||||
for set_num in 0 .. cmp::max(self.num_sets(), other.num_sets()) {
|
||||
@ -200,11 +211,12 @@ unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSuperset<U> for T
|
||||
});
|
||||
}
|
||||
},
|
||||
(None, Some(_)) => return Err(PipelineLayoutNotSupersetError::ExpectedEmptyDescriptor {
|
||||
(None, Some(_)) =>
|
||||
return Err(PipelineLayoutNotSupersetError::ExpectedEmptyDescriptor {
|
||||
set_num: set_num as u32,
|
||||
descriptor: desc_num as u32,
|
||||
}),
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -222,14 +234,11 @@ pub enum PipelineLayoutNotSupersetError {
|
||||
DescriptorsCountMismatch {
|
||||
set_num: u32,
|
||||
self_num_descriptors: u32,
|
||||
other_num_descriptors: u32
|
||||
other_num_descriptors: u32,
|
||||
},
|
||||
|
||||
/// Expected an empty descriptor, but got something instead.
|
||||
ExpectedEmptyDescriptor {
|
||||
set_num: u32,
|
||||
descriptor: u32,
|
||||
},
|
||||
ExpectedEmptyDescriptor { set_num: u32, descriptor: u32 },
|
||||
|
||||
/// Two descriptors are incompatible.
|
||||
IncompatibleDescriptors {
|
||||
@ -272,7 +281,8 @@ pub unsafe trait PipelineLayoutSetsCompatible<Other: ?Sized>: PipelineLayoutDesc
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSetsCompatible<U> for T
|
||||
where T: PipelineLayoutDesc, U: DescriptorSetsCollection
|
||||
where T: PipelineLayoutDesc,
|
||||
U: DescriptorSetsCollection
|
||||
{
|
||||
fn is_compatible(&self, sets: &U) -> bool {
|
||||
/*let mut other_descriptor_sets = DescriptorSetsCollection::description(sets);
|
||||
@ -302,7 +312,8 @@ unsafe impl<T: ?Sized, U: ?Sized> PipelineLayoutSetsCompatible<U> for T
|
||||
|
||||
/// Traits that allow determining whether
|
||||
// TODO: require a trait on Pc
|
||||
pub unsafe trait PipelineLayoutPushConstantsCompatible<Pc: ?Sized>: PipelineLayoutDesc {
|
||||
pub unsafe trait PipelineLayoutPushConstantsCompatible<Pc: ?Sized>
|
||||
: PipelineLayoutDesc {
|
||||
/// Returns true if `Pc` can be used with a pipeline that uses `self` as layout.
|
||||
fn is_compatible(&self, &Pc) -> bool;
|
||||
}
|
||||
|
@ -7,13 +7,13 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::cmp;
|
||||
use std::sync::Arc;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
use std::cmp;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Contains the union of two pipeline layout description.
|
||||
///
|
||||
@ -32,7 +32,8 @@ impl<A, B> PipelineLayoutDescUnion<A, B> {
|
||||
}
|
||||
|
||||
unsafe impl<A, B> PipelineLayoutDesc for PipelineLayoutDescUnion<A, B>
|
||||
where A: PipelineLayoutDesc, B: PipelineLayoutDesc
|
||||
where A: PipelineLayoutDesc,
|
||||
B: PipelineLayoutDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
@ -67,16 +68,19 @@ unsafe impl<A, B> PipelineLayoutDesc for PipelineLayoutDescUnion<A, B>
|
||||
|
||||
#[inline]
|
||||
fn provided_set_layout(&self, set: usize) -> Option<Arc<UnsafeDescriptorSetLayout>> {
|
||||
self.a.provided_set_layout(set).or(self.b.provided_set_layout(set))
|
||||
self.a
|
||||
.provided_set_layout(set)
|
||||
.or(self.b.provided_set_layout(set))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn num_push_constants_ranges(&self) -> usize {
|
||||
// We simply call `push_constants_range` repeatidely to determine when it is over.
|
||||
// TODO: consider caching this
|
||||
(self.a.num_push_constants_ranges() ..).filter(|&n| {
|
||||
self.push_constants_range(n).is_none()
|
||||
}).next().unwrap()
|
||||
(self.a.num_push_constants_ranges() ..)
|
||||
.filter(|&n| self.push_constants_range(n).is_none())
|
||||
.next()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// TODO: needs tests
|
||||
@ -132,7 +136,8 @@ unsafe impl<A, B> PipelineLayoutDesc for PipelineLayoutDescUnion<A, B>
|
||||
}
|
||||
|
||||
unsafe impl<A, B> PipelineLayoutDescNames for PipelineLayoutDescUnion<A, B>
|
||||
where A: PipelineLayoutDescNames, B: PipelineLayoutDescNames
|
||||
where A: PipelineLayoutDescNames,
|
||||
B: PipelineLayoutDescNames
|
||||
{
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
@ -143,7 +148,10 @@ unsafe impl<A, B> PipelineLayoutDescNames for PipelineLayoutDescUnion<A, B>
|
||||
(None, None) => None,
|
||||
(Some(r), None) => Some(r),
|
||||
(None, Some(r)) => Some(r),
|
||||
(Some(a), Some(b)) => { assert_eq!(a, b); Some(a) }
|
||||
(Some(a), Some(b)) => {
|
||||
assert_eq!(a, b);
|
||||
Some(a)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,10 +89,12 @@
|
||||
//!
|
||||
//! TODO: write
|
||||
|
||||
use fnv::FnvHasher;
|
||||
use smallvec::SmallVec;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::fmt;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::hash::BuildHasherDefault;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
@ -101,8 +103,6 @@ use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
use std::sync::Weak;
|
||||
use smallvec::SmallVec;
|
||||
use fnv::FnvHasher;
|
||||
|
||||
use command_buffer::pool::StandardCommandPool;
|
||||
use descriptor::descriptor_set::StdDescriptorPool;
|
||||
@ -129,15 +129,18 @@ pub struct Device {
|
||||
vk: vk::DevicePointers,
|
||||
standard_pool: Mutex<Weak<StdMemoryPool>>,
|
||||
standard_descriptor_pool: Mutex<Weak<StdDescriptorPool>>,
|
||||
standard_command_pools: Mutex<HashMap<u32, Weak<StandardCommandPool>, BuildHasherDefault<FnvHasher>>>,
|
||||
standard_command_pools:
|
||||
Mutex<HashMap<u32, Weak<StandardCommandPool>, BuildHasherDefault<FnvHasher>>>,
|
||||
features: Features,
|
||||
extensions: DeviceExtensions,
|
||||
}
|
||||
|
||||
// The `StandardCommandPool` type doesn't implement Send/Sync, so we have to manually reimplement
|
||||
// them for the device itself.
|
||||
unsafe impl Send for Device {}
|
||||
unsafe impl Sync for Device {}
|
||||
unsafe impl Send for Device {
|
||||
}
|
||||
unsafe impl Sync for Device {
|
||||
}
|
||||
|
||||
impl Device {
|
||||
/// Builds a new Vulkan device for the given physical device.
|
||||
@ -164,7 +167,7 @@ impl Device {
|
||||
extensions: Ext, queue_families: I)
|
||||
-> Result<(Arc<Device>, QueuesIter), DeviceCreationError>
|
||||
where I: IntoIterator<Item = (QueueFamily<'a>, f32)>,
|
||||
Ext: Into<RawDeviceExtensions>,
|
||||
Ext: Into<RawDeviceExtensions>
|
||||
{
|
||||
let queue_families = queue_families.into_iter();
|
||||
|
||||
@ -186,14 +189,16 @@ impl Device {
|
||||
// Because there's no way to query the list of layers enabled for an instance, we need
|
||||
// to save it alongside the instance. (`vkEnumerateDeviceLayerProperties` should get
|
||||
// the right list post-1.0.13, but not pre-1.0.13, so we can't use it here.)
|
||||
let layers_ptr = phys.instance().loaded_layers().map(|layer| {
|
||||
layer.as_ptr()
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
let layers_ptr = phys.instance()
|
||||
.loaded_layers()
|
||||
.map(|layer| layer.as_ptr())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
let extensions = extensions.into();
|
||||
let extensions_list = extensions.iter().map(|extension| {
|
||||
extension.as_ptr()
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
let extensions_list = extensions
|
||||
.iter()
|
||||
.map(|extension| extension.as_ptr())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
// device creation
|
||||
let device = unsafe {
|
||||
@ -223,16 +228,19 @@ impl Device {
|
||||
}
|
||||
|
||||
// turning `queues` into an array of `vkDeviceQueueCreateInfo` suitable for Vulkan
|
||||
let queues = queues.iter().map(|&(queue_id, ref priorities)| {
|
||||
let queues = queues
|
||||
.iter()
|
||||
.map(|&(queue_id, ref priorities)| {
|
||||
vk::DeviceQueueCreateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
flags: 0, // reserved
|
||||
queueFamilyIndex: queue_id,
|
||||
queueCount: priorities.len() as u32,
|
||||
pQueuePriorities: priorities.as_ptr()
|
||||
pQueuePriorities: priorities.as_ptr(),
|
||||
}
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
})
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
// TODO: The plan regarding `robustBufferAccess` is to check the shaders' code to see
|
||||
// if they can possibly perform out-of-bounds reads and writes. If the user tries
|
||||
@ -267,14 +275,17 @@ impl Device {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk_i.CreateDevice(phys.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk_i.CreateDevice(phys.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
// loading the function pointers of the newly-created device
|
||||
let vk = vk::DevicePointers::load(|name| {
|
||||
unsafe { vk_i.GetDeviceProcAddr(device, name.as_ptr()) as *const _ }
|
||||
let vk = vk::DevicePointers::load(|name| unsafe {
|
||||
vk_i.GetDeviceProcAddr(device, name.as_ptr()) as
|
||||
*const _
|
||||
});
|
||||
|
||||
let device = Arc::new(Device {
|
||||
@ -317,7 +328,7 @@ impl Device {
|
||||
/// while this function is waiting.
|
||||
///
|
||||
pub unsafe fn wait(&self) -> Result<(), OomError> {
|
||||
try!(check_errors(self.vk.DeviceWaitIdle(self.device)));
|
||||
check_errors(self.vk.DeviceWaitIdle(self.device))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -397,7 +408,7 @@ impl Device {
|
||||
let new_pool = Arc::new(StandardCommandPool::new(me.clone(), queue));
|
||||
entry.insert(Arc::downgrade(&new_pool));
|
||||
new_pool
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -439,7 +450,10 @@ pub unsafe trait DeviceOwned {
|
||||
fn device(&self) -> &Arc<Device>;
|
||||
}
|
||||
|
||||
unsafe impl<T> DeviceOwned for T where T: Deref, T::Target: DeviceOwned {
|
||||
unsafe impl<T> DeviceOwned for T
|
||||
where T: Deref,
|
||||
T::Target: DeviceOwned
|
||||
{
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
(**self).device()
|
||||
@ -460,13 +474,15 @@ impl Iterator for QueuesIter {
|
||||
unsafe {
|
||||
let &(family, id) = match self.families_and_ids.get(self.next_queue) {
|
||||
Some(a) => a,
|
||||
None => return None
|
||||
None => return None,
|
||||
};
|
||||
|
||||
self.next_queue += 1;
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
self.device.vk.GetDeviceQueue(self.device.device, family, id, &mut output);
|
||||
self.device
|
||||
.vk
|
||||
.GetDeviceQueue(self.device.device, family, id, &mut output);
|
||||
|
||||
Some(Arc::new(Queue {
|
||||
queue: Mutex::new(output),
|
||||
@ -484,7 +500,8 @@ impl Iterator for QueuesIter {
|
||||
}
|
||||
}
|
||||
|
||||
impl ExactSizeIterator for QueuesIter {}
|
||||
impl ExactSizeIterator for QueuesIter {
|
||||
}
|
||||
|
||||
/// Error that can be returned when creating a device.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
@ -562,7 +579,7 @@ impl From<Error> for DeviceCreationError {
|
||||
Error::ExtensionNotPresent => DeviceCreationError::ExtensionNotPresent,
|
||||
Error::FeatureNotPresent => DeviceCreationError::FeatureNotPresent,
|
||||
Error::TooManyObjects => DeviceCreationError::TooManyObjects,
|
||||
_ => panic!("Unexpected error value: {}", err as i32)
|
||||
_ => panic!("Unexpected error value: {}", err as i32),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -587,15 +604,17 @@ impl Queue {
|
||||
/// Returns true if this is the same queue as another one.
|
||||
#[inline]
|
||||
pub fn is_same(&self, other: &Queue) -> bool {
|
||||
self.id == other.id &&
|
||||
self.family == other.family &&
|
||||
self.id == other.id && self.family == other.family &&
|
||||
self.device.internal_object() == other.device.internal_object()
|
||||
}
|
||||
|
||||
/// Returns the family this queue belongs to.
|
||||
#[inline]
|
||||
pub fn family(&self) -> QueueFamily {
|
||||
self.device.physical_device().queue_family_by_id(self.family).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.queue_family_by_id(self.family)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Returns the index of this queue within its family.
|
||||
@ -612,7 +631,7 @@ impl Queue {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
let queue = self.queue.lock().unwrap();
|
||||
try!(check_errors(vk.QueueWaitIdle(*queue)));
|
||||
check_errors(vk.QueueWaitIdle(*queue))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -629,12 +648,12 @@ unsafe impl SynchronizedVulkanObject for Queue {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use device::Device;
|
||||
use device::DeviceCreationError;
|
||||
use device::DeviceExtensions;
|
||||
use features::Features;
|
||||
use instance;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn one_ref() {
|
||||
@ -647,15 +666,18 @@ mod tests {
|
||||
let instance = instance!();
|
||||
let physical = match instance::PhysicalDevice::enumerate(&instance).next() {
|
||||
Some(p) => p,
|
||||
None => return
|
||||
None => return,
|
||||
};
|
||||
|
||||
let family = physical.queue_families().next().unwrap();
|
||||
let queues = (0 .. family.queues_count() + 1).map(|_| (family, 1.0));
|
||||
|
||||
match Device::new(&physical, &Features::none(), &DeviceExtensions::none(), queues) {
|
||||
match Device::new(&physical,
|
||||
&Features::none(),
|
||||
&DeviceExtensions::none(),
|
||||
queues) {
|
||||
Err(DeviceCreationError::TooManyQueuesForFamily) => return, // Success
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -664,7 +686,7 @@ mod tests {
|
||||
let instance = instance!();
|
||||
let physical = match instance::PhysicalDevice::enumerate(&instance).next() {
|
||||
Some(p) => p,
|
||||
None => return
|
||||
None => return,
|
||||
};
|
||||
|
||||
let family = physical.queue_families().next().unwrap();
|
||||
@ -675,9 +697,12 @@ mod tests {
|
||||
return;
|
||||
}
|
||||
|
||||
match Device::new(&physical, &features, &DeviceExtensions::none(), Some((family, 1.0))) {
|
||||
match Device::new(&physical,
|
||||
&features,
|
||||
&DeviceExtensions::none(),
|
||||
Some((family, 1.0))) {
|
||||
Err(DeviceCreationError::FeatureNotPresent) => return, // Success
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -686,23 +711,25 @@ mod tests {
|
||||
let instance = instance!();
|
||||
let physical = match instance::PhysicalDevice::enumerate(&instance).next() {
|
||||
Some(p) => p,
|
||||
None => return
|
||||
None => return,
|
||||
};
|
||||
|
||||
let family = physical.queue_families().next().unwrap();
|
||||
|
||||
match Device::new(&physical, &Features::none(),
|
||||
&DeviceExtensions::none(), Some((family, 1.4)))
|
||||
{
|
||||
match Device::new(&physical,
|
||||
&Features::none(),
|
||||
&DeviceExtensions::none(),
|
||||
Some((family, 1.4))) {
|
||||
Err(DeviceCreationError::PriorityOutOfRange) => (), // Success
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
match Device::new(&physical, &Features::none(),
|
||||
&DeviceExtensions::none(), Some((family, -0.2)))
|
||||
{
|
||||
match Device::new(&physical,
|
||||
&Features::none(),
|
||||
&DeviceExtensions::none(),
|
||||
Some((family, -0.2))) {
|
||||
Err(DeviceCreationError::PriorityOutOfRange) => (), // Success
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -101,6 +101,7 @@
|
||||
//!
|
||||
//! // TODO: storage formats
|
||||
//!
|
||||
|
||||
use std::vec::IntoIter as VecIntoIter;
|
||||
use vk;
|
||||
|
||||
@ -120,11 +121,15 @@ pub unsafe trait Data {
|
||||
// TODO: that's just an example ; implement for all common data types
|
||||
unsafe impl Data for i8 {
|
||||
#[inline]
|
||||
fn ty() -> Format { Format::R8Sint }
|
||||
fn ty() -> Format {
|
||||
Format::R8Sint
|
||||
}
|
||||
}
|
||||
unsafe impl Data for u8 {
|
||||
#[inline]
|
||||
fn ty() -> Format { Format::R8Uint }
|
||||
fn ty() -> Format {
|
||||
Format::R8Uint
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! formats {
|
||||
@ -572,7 +577,7 @@ unsafe impl FormatDesc for Format {
|
||||
(FormatTy::Depth, f @ ClearValue::Depth(_)) => f,
|
||||
(FormatTy::Stencil, f @ ClearValue::Stencil(_)) => f,
|
||||
(FormatTy::DepthStencil, f @ ClearValue::DepthStencil(_)) => f,
|
||||
_ => panic!("Wrong clear value")
|
||||
_ => panic!("Wrong clear value"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -585,7 +590,9 @@ pub unsafe trait PossibleFloatFormatDesc: FormatDesc {
|
||||
|
||||
unsafe impl PossibleFloatFormatDesc for Format {
|
||||
#[inline]
|
||||
fn is_float(&self) -> bool { self.ty() == FormatTy::Float }
|
||||
fn is_float(&self) -> bool {
|
||||
self.ty() == FormatTy::Float
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe trait PossibleUintFormatDesc: FormatDesc {
|
||||
@ -594,7 +601,9 @@ pub unsafe trait PossibleUintFormatDesc: FormatDesc {
|
||||
|
||||
unsafe impl PossibleUintFormatDesc for Format {
|
||||
#[inline]
|
||||
fn is_uint(&self) -> bool { self.ty() == FormatTy::Uint }
|
||||
fn is_uint(&self) -> bool {
|
||||
self.ty() == FormatTy::Uint
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe trait PossibleSintFormatDesc: FormatDesc {
|
||||
@ -603,7 +612,9 @@ pub unsafe trait PossibleSintFormatDesc: FormatDesc {
|
||||
|
||||
unsafe impl PossibleSintFormatDesc for Format {
|
||||
#[inline]
|
||||
fn is_sint(&self) -> bool { self.ty() == FormatTy::Sint }
|
||||
fn is_sint(&self) -> bool {
|
||||
self.ty() == FormatTy::Sint
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe trait PossibleDepthFormatDesc: FormatDesc {
|
||||
@ -612,7 +623,9 @@ pub unsafe trait PossibleDepthFormatDesc: FormatDesc {
|
||||
|
||||
unsafe impl PossibleDepthFormatDesc for Format {
|
||||
#[inline]
|
||||
fn is_depth(&self) -> bool { self.ty() == FormatTy::Depth }
|
||||
fn is_depth(&self) -> bool {
|
||||
self.ty() == FormatTy::Depth
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe trait PossibleStencilFormatDesc: FormatDesc {
|
||||
@ -621,7 +634,9 @@ pub unsafe trait PossibleStencilFormatDesc: FormatDesc {
|
||||
|
||||
unsafe impl PossibleStencilFormatDesc for Format {
|
||||
#[inline]
|
||||
fn is_stencil(&self) -> bool { self.ty() == FormatTy::Stencil }
|
||||
fn is_stencil(&self) -> bool {
|
||||
self.ty() == FormatTy::Stencil
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe trait PossibleDepthStencilFormatDesc: FormatDesc {
|
||||
@ -630,7 +645,9 @@ pub unsafe trait PossibleDepthStencilFormatDesc: FormatDesc {
|
||||
|
||||
unsafe impl PossibleDepthStencilFormatDesc for Format {
|
||||
#[inline]
|
||||
fn is_depth_stencil(&self) -> bool { self.ty() == FormatTy::DepthStencil }
|
||||
fn is_depth_stencil(&self) -> bool {
|
||||
self.ty() == FormatTy::DepthStencil
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe trait PossibleCompressedFormatDesc: FormatDesc {
|
||||
@ -639,7 +656,9 @@ pub unsafe trait PossibleCompressedFormatDesc: FormatDesc {
|
||||
|
||||
unsafe impl PossibleCompressedFormatDesc for Format {
|
||||
#[inline]
|
||||
fn is_compressed(&self) -> bool { self.ty() == FormatTy::Compressed }
|
||||
fn is_compressed(&self) -> bool {
|
||||
self.ty() == FormatTy::Compressed
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for types that can possibly describe a float or compressed attachment.
|
||||
|
@ -7,9 +7,9 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::sync::Arc;
|
||||
use SafeDeref;
|
||||
use image::ImageViewAccess;
|
||||
use std::sync::Arc;
|
||||
//use sync::AccessFlagBits;
|
||||
//use sync::PipelineStages;
|
||||
|
||||
@ -20,7 +20,10 @@ pub unsafe trait AttachmentsList {
|
||||
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess>;
|
||||
}
|
||||
|
||||
unsafe impl<T> AttachmentsList for T where T: SafeDeref, T::Target: AttachmentsList {
|
||||
unsafe impl<T> AttachmentsList for T
|
||||
where T: SafeDeref,
|
||||
T::Target: AttachmentsList
|
||||
{
|
||||
#[inline]
|
||||
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
|
||||
(**self).as_image_view_accesses()
|
||||
@ -42,7 +45,8 @@ unsafe impl AttachmentsList for Vec<Arc<ImageViewAccess + Send + Sync>> {
|
||||
}
|
||||
|
||||
unsafe impl<A, B> AttachmentsList for (A, B)
|
||||
where A: AttachmentsList, B: ImageViewAccess
|
||||
where A: AttachmentsList,
|
||||
B: ImageViewAccess
|
||||
{
|
||||
#[inline]
|
||||
fn as_image_view_accesses(&self) -> Vec<&ImageViewAccess> {
|
||||
|
@ -10,11 +10,11 @@
|
||||
//! This module contains the `ensure_image_view_compatible` function, which verifies whether
|
||||
//! an image view can be used as a render pass attachment.
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use format::Format;
|
||||
use framebuffer::RenderPassDesc;
|
||||
use image::ImageViewAccess;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
|
||||
/// Checks whether the given image view is allowed to be the nth attachment of the given render
|
||||
/// pass.
|
||||
@ -29,7 +29,8 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
|
||||
where Rp: ?Sized + RenderPassDesc,
|
||||
I: ?Sized + ImageViewAccess
|
||||
{
|
||||
let attachment_desc = render_pass.attachment_desc(attachment_num)
|
||||
let attachment_desc = render_pass
|
||||
.attachment_desc(attachment_num)
|
||||
.expect("Attachment num out of range");
|
||||
|
||||
if image.format() != attachment_desc.format {
|
||||
@ -51,10 +52,15 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
|
||||
}
|
||||
|
||||
for subpass_num in 0 .. render_pass.num_subpasses() {
|
||||
let subpass = render_pass.subpass_desc(subpass_num).expect("Subpass num out of range ; \
|
||||
wrong RenderPassDesc trait impl");
|
||||
let subpass = render_pass
|
||||
.subpass_desc(subpass_num)
|
||||
.expect("Subpass num out of range ; wrong RenderPassDesc trait impl");
|
||||
|
||||
if subpass.color_attachments.iter().any(|&(n, _)| n == attachment_num) {
|
||||
if subpass
|
||||
.color_attachments
|
||||
.iter()
|
||||
.any(|&(n, _)| n == attachment_num)
|
||||
{
|
||||
debug_assert!(image.parent().has_color()); // Was normally checked by the render pass.
|
||||
if !image.parent().inner().usage_color_attachment() {
|
||||
return Err(IncompatibleRenderPassAttachmentError::MissingColorAttachmentUsage);
|
||||
@ -71,7 +77,11 @@ pub fn ensure_image_view_compatible<Rp, I>(render_pass: &Rp, attachment_num: usi
|
||||
}
|
||||
}
|
||||
|
||||
if subpass.input_attachments.iter().any(|&(n, _)| n == attachment_num) {
|
||||
if subpass
|
||||
.input_attachments
|
||||
.iter()
|
||||
.any(|&(n, _)| n == attachment_num)
|
||||
{
|
||||
if !image.parent().inner().usage_input_attachment() {
|
||||
return Err(IncompatibleRenderPassAttachmentError::MissingInputAttachmentUsage);
|
||||
}
|
||||
@ -158,11 +168,11 @@ impl fmt::Display for IncompatibleRenderPassAttachmentError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::IncompatibleRenderPassAttachmentError;
|
||||
use super::ensure_image_view_compatible;
|
||||
use format::Format;
|
||||
use framebuffer::EmptySinglePassRenderPassDesc;
|
||||
use image::AttachmentImage;
|
||||
use super::ensure_image_view_compatible;
|
||||
use super::IncompatibleRenderPassAttachmentError;
|
||||
|
||||
#[test]
|
||||
fn basic_ok() {
|
||||
@ -211,8 +221,10 @@ mod tests {
|
||||
|
||||
match ensure_image_view_compatible(&rp, 0, &img) {
|
||||
Err(IncompatibleRenderPassAttachmentError::FormatMismatch {
|
||||
expected: Format::R16G16Sfloat, obtained: Format::R8G8B8A8Unorm }) => (),
|
||||
e => panic!("{:?}", e)
|
||||
expected: Format::R16G16Sfloat,
|
||||
obtained: Format::R8G8B8A8Unorm,
|
||||
}) => (),
|
||||
e => panic!("{:?}", e),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,10 +14,10 @@ use format::ClearValue;
|
||||
use format::Format;
|
||||
use format::FormatTy;
|
||||
use framebuffer::RenderPass;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use framebuffer::RenderPassCompatible;
|
||||
use framebuffer::RenderPassCreationError;
|
||||
use image::ImageLayout as ImageLayout;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use image::ImageLayout;
|
||||
use sync::AccessFlagBits;
|
||||
use sync::PipelineStages;
|
||||
|
||||
@ -53,8 +53,13 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
|
||||
/// Returns an iterator to the list of attachments.
|
||||
#[inline]
|
||||
fn attachment_descs(&self) -> RenderPassDescAttachments<Self> where Self: Sized {
|
||||
RenderPassDescAttachments { render_pass: self, num: 0 }
|
||||
fn attachment_descs(&self) -> RenderPassDescAttachments<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
RenderPassDescAttachments {
|
||||
render_pass: self,
|
||||
num: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of subpasses of the render pass.
|
||||
@ -67,8 +72,13 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
|
||||
/// Returns an iterator to the list of subpasses.
|
||||
#[inline]
|
||||
fn subpass_descs(&self) -> RenderPassDescSubpasses<Self> where Self: Sized {
|
||||
RenderPassDescSubpasses { render_pass: self, num: 0 }
|
||||
fn subpass_descs(&self) -> RenderPassDescSubpasses<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
RenderPassDescSubpasses {
|
||||
render_pass: self,
|
||||
num: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of dependencies of the render pass.
|
||||
@ -81,8 +91,13 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
|
||||
/// Returns an iterator to the list of dependencies.
|
||||
#[inline]
|
||||
fn dependency_descs(&self) -> RenderPassDescDependencies<Self> where Self: Sized {
|
||||
RenderPassDescDependencies { render_pass: self, num: 0 }
|
||||
fn dependency_descs(&self) -> RenderPassDescDependencies<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
RenderPassDescDependencies {
|
||||
render_pass: self,
|
||||
num: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this render pass is compatible with another render pass.
|
||||
@ -114,18 +129,30 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
/// Returns the number of color attachments of a subpass. Returns `None` if out of range.
|
||||
#[inline]
|
||||
fn num_color_attachments(&self, subpass: u32) -> Option<u32> {
|
||||
(&self).subpass_descs().skip(subpass as usize).next().map(|p| p.color_attachments.len() as u32)
|
||||
(&self)
|
||||
.subpass_descs()
|
||||
.skip(subpass as usize)
|
||||
.next()
|
||||
.map(|p| p.color_attachments.len() as u32)
|
||||
}
|
||||
|
||||
/// Returns the number of samples of the attachments of a subpass. Returns `None` if out of
|
||||
/// range or if the subpass has no attachment. TODO: return an enum instead?
|
||||
#[inline]
|
||||
fn num_samples(&self, subpass: u32) -> Option<u32> {
|
||||
(&self).subpass_descs().skip(subpass as usize).next().and_then(|p| {
|
||||
(&self)
|
||||
.subpass_descs()
|
||||
.skip(subpass as usize)
|
||||
.next()
|
||||
.and_then(|p| {
|
||||
// TODO: chain input attachments as well?
|
||||
p.color_attachments.iter().cloned().chain(p.depth_stencil.clone().into_iter())
|
||||
p.color_attachments
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(p.depth_stencil.clone().into_iter())
|
||||
.filter_map(|a| (&self).attachment_descs().skip(a.0).next())
|
||||
.next().map(|a| a.samples)
|
||||
.next()
|
||||
.map(|a| a.samples)
|
||||
})
|
||||
}
|
||||
|
||||
@ -133,17 +160,27 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
/// second element is `true` if there's a stencil attachment. Returns `None` if out of range.
|
||||
#[inline]
|
||||
fn has_depth_stencil_attachment(&self, subpass: u32) -> Option<(bool, bool)> {
|
||||
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
|
||||
(&self)
|
||||
.subpass_descs()
|
||||
.skip(subpass as usize)
|
||||
.next()
|
||||
.map(|p| {
|
||||
let atch_num = match p.depth_stencil {
|
||||
Some((d, _)) => d,
|
||||
None => return (false, false)
|
||||
None => return (false, false),
|
||||
};
|
||||
|
||||
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
|
||||
match (&self)
|
||||
.attachment_descs()
|
||||
.skip(atch_num)
|
||||
.next()
|
||||
.unwrap()
|
||||
.format
|
||||
.ty() {
|
||||
FormatTy::Depth => (true, false),
|
||||
FormatTy::Stencil => (false, true),
|
||||
FormatTy::DepthStencil => (true, true),
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -151,17 +188,27 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
/// Returns true if a subpass has a depth attachment or a depth-stencil attachment.
|
||||
#[inline]
|
||||
fn has_depth(&self, subpass: u32) -> Option<bool> {
|
||||
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
|
||||
(&self)
|
||||
.subpass_descs()
|
||||
.skip(subpass as usize)
|
||||
.next()
|
||||
.map(|p| {
|
||||
let atch_num = match p.depth_stencil {
|
||||
Some((d, _)) => d,
|
||||
None => return false
|
||||
None => return false,
|
||||
};
|
||||
|
||||
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
|
||||
match (&self)
|
||||
.attachment_descs()
|
||||
.skip(atch_num)
|
||||
.next()
|
||||
.unwrap()
|
||||
.format
|
||||
.ty() {
|
||||
FormatTy::Depth => true,
|
||||
FormatTy::Stencil => false,
|
||||
FormatTy::DepthStencil => true,
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -170,20 +217,32 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
/// layout is not `DepthStencilReadOnlyOptimal`.
|
||||
#[inline]
|
||||
fn has_writable_depth(&self, subpass: u32) -> Option<bool> {
|
||||
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
|
||||
(&self)
|
||||
.subpass_descs()
|
||||
.skip(subpass as usize)
|
||||
.next()
|
||||
.map(|p| {
|
||||
let atch_num = match p.depth_stencil {
|
||||
Some((d, l)) => {
|
||||
if l == ImageLayout::DepthStencilReadOnlyOptimal { return false; }
|
||||
if l == ImageLayout::DepthStencilReadOnlyOptimal {
|
||||
return false;
|
||||
}
|
||||
d
|
||||
},
|
||||
None => return false
|
||||
None => return false,
|
||||
};
|
||||
|
||||
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
|
||||
match (&self)
|
||||
.attachment_descs()
|
||||
.skip(atch_num)
|
||||
.next()
|
||||
.unwrap()
|
||||
.format
|
||||
.ty() {
|
||||
FormatTy::Depth => true,
|
||||
FormatTy::Stencil => false,
|
||||
FormatTy::DepthStencil => true,
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -191,17 +250,27 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
/// Returns true if a subpass has a stencil attachment or a depth-stencil attachment.
|
||||
#[inline]
|
||||
fn has_stencil(&self, subpass: u32) -> Option<bool> {
|
||||
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
|
||||
(&self)
|
||||
.subpass_descs()
|
||||
.skip(subpass as usize)
|
||||
.next()
|
||||
.map(|p| {
|
||||
let atch_num = match p.depth_stencil {
|
||||
Some((d, _)) => d,
|
||||
None => return false
|
||||
None => return false,
|
||||
};
|
||||
|
||||
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
|
||||
match (&self)
|
||||
.attachment_descs()
|
||||
.skip(atch_num)
|
||||
.next()
|
||||
.unwrap()
|
||||
.format
|
||||
.ty() {
|
||||
FormatTy::Depth => false,
|
||||
FormatTy::Stencil => true,
|
||||
FormatTy::DepthStencil => true,
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -210,26 +279,41 @@ pub unsafe trait RenderPassDesc: RenderPassDescClearValues<Vec<ClearValue>> {
|
||||
/// layout is not `DepthStencilReadOnlyOptimal`.
|
||||
#[inline]
|
||||
fn has_writable_stencil(&self, subpass: u32) -> Option<bool> {
|
||||
(&self).subpass_descs().skip(subpass as usize).next().map(|p| {
|
||||
(&self)
|
||||
.subpass_descs()
|
||||
.skip(subpass as usize)
|
||||
.next()
|
||||
.map(|p| {
|
||||
let atch_num = match p.depth_stencil {
|
||||
Some((d, l)) => {
|
||||
if l == ImageLayout::DepthStencilReadOnlyOptimal { return false; }
|
||||
if l == ImageLayout::DepthStencilReadOnlyOptimal {
|
||||
return false;
|
||||
}
|
||||
d
|
||||
},
|
||||
None => return false
|
||||
None => return false,
|
||||
};
|
||||
|
||||
match (&self).attachment_descs().skip(atch_num).next().unwrap().format.ty() {
|
||||
match (&self)
|
||||
.attachment_descs()
|
||||
.skip(atch_num)
|
||||
.next()
|
||||
.unwrap()
|
||||
.format
|
||||
.ty() {
|
||||
FormatTy::Depth => false,
|
||||
FormatTy::Stencil => true,
|
||||
FormatTy::DepthStencil => true,
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> RenderPassDesc for T where T: SafeDeref, T::Target: RenderPassDesc {
|
||||
unsafe impl<T> RenderPassDesc for T
|
||||
where T: SafeDeref,
|
||||
T::Target: RenderPassDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_attachments(&self) -> usize {
|
||||
(**self).num_attachments()
|
||||
@ -268,14 +352,18 @@ pub struct RenderPassDescAttachments<'a, R: ?Sized + 'a> {
|
||||
num: usize,
|
||||
}
|
||||
|
||||
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescAttachments<'a, R> where R: RenderPassDesc {
|
||||
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescAttachments<'a, R>
|
||||
where R: RenderPassDesc
|
||||
{
|
||||
type Item = LayoutAttachmentDescription;
|
||||
|
||||
fn next(&mut self) -> Option<LayoutAttachmentDescription> {
|
||||
if self.num < self.render_pass.num_attachments() {
|
||||
let n = self.num;
|
||||
self.num += 1;
|
||||
Some(self.render_pass.attachment_desc(n).expect("Wrong RenderPassDesc implementation"))
|
||||
Some(self.render_pass
|
||||
.attachment_desc(n)
|
||||
.expect("Wrong RenderPassDesc implementation"))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -289,14 +377,18 @@ pub struct RenderPassDescSubpasses<'a, R: ?Sized + 'a> {
|
||||
num: usize,
|
||||
}
|
||||
|
||||
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescSubpasses<'a, R> where R: RenderPassDesc {
|
||||
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescSubpasses<'a, R>
|
||||
where R: RenderPassDesc
|
||||
{
|
||||
type Item = LayoutPassDescription;
|
||||
|
||||
fn next(&mut self) -> Option<LayoutPassDescription> {
|
||||
if self.num < self.render_pass.num_subpasses() {
|
||||
let n = self.num;
|
||||
self.num += 1;
|
||||
Some(self.render_pass.subpass_desc(n).expect("Wrong RenderPassDesc implementation"))
|
||||
Some(self.render_pass
|
||||
.subpass_desc(n)
|
||||
.expect("Wrong RenderPassDesc implementation"))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -310,14 +402,18 @@ pub struct RenderPassDescDependencies<'a, R: ?Sized + 'a> {
|
||||
num: usize,
|
||||
}
|
||||
|
||||
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescDependencies<'a, R> where R: RenderPassDesc {
|
||||
impl<'a, R: ?Sized + 'a> Iterator for RenderPassDescDependencies<'a, R>
|
||||
where R: RenderPassDesc
|
||||
{
|
||||
type Item = LayoutPassDependencyDescription;
|
||||
|
||||
fn next(&mut self) -> Option<LayoutPassDependencyDescription> {
|
||||
if self.num < self.render_pass.num_dependencies() {
|
||||
let n = self.num;
|
||||
self.num += 1;
|
||||
Some(self.render_pass.dependency_desc(n).expect("Wrong RenderPassDesc implementation"))
|
||||
Some(self.render_pass
|
||||
.dependency_desc(n)
|
||||
.expect("Wrong RenderPassDesc implementation"))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -7,13 +7,13 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::iter;
|
||||
use format::ClearValue;
|
||||
use framebuffer::LayoutAttachmentDescription;
|
||||
use framebuffer::LayoutPassDependencyDescription;
|
||||
use framebuffer::LayoutPassDescription;
|
||||
use framebuffer::RenderPassDesc;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use framebuffer::LayoutAttachmentDescription;
|
||||
use framebuffer::LayoutPassDescription;
|
||||
use framebuffer::LayoutPassDependencyDescription;
|
||||
use std::iter;
|
||||
|
||||
/// Description of an empty render pass.
|
||||
///
|
||||
@ -75,11 +75,7 @@ unsafe impl RenderPassDesc for EmptySinglePassRenderPassDesc {
|
||||
|
||||
#[inline]
|
||||
fn num_color_attachments(&self, subpass: u32) -> Option<u32> {
|
||||
if subpass == 0 {
|
||||
Some(0)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
if subpass == 0 { Some(0) } else { None }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -98,38 +94,22 @@ unsafe impl RenderPassDesc for EmptySinglePassRenderPassDesc {
|
||||
|
||||
#[inline]
|
||||
fn has_depth(&self, subpass: u32) -> Option<bool> {
|
||||
if subpass == 0 {
|
||||
Some(false)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
if subpass == 0 { Some(false) } else { None }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_writable_depth(&self, subpass: u32) -> Option<bool> {
|
||||
if subpass == 0 {
|
||||
Some(false)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
if subpass == 0 { Some(false) } else { None }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_stencil(&self, subpass: u32) -> Option<bool> {
|
||||
if subpass == 0 {
|
||||
Some(false)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
if subpass == 0 { Some(false) } else { None }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_writable_stencil(&self, subpass: u32) -> Option<bool> {
|
||||
if subpass == 0 {
|
||||
Some(false)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
if subpass == 0 { Some(false) } else { None }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::cmp;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
@ -14,7 +15,6 @@ use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
@ -26,8 +26,8 @@ use framebuffer::LayoutAttachmentDescription;
|
||||
use framebuffer::LayoutPassDependencyDescription;
|
||||
use framebuffer::LayoutPassDescription;
|
||||
use framebuffer::RenderPassAbstract;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use framebuffer::RenderPassDesc;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use framebuffer::RenderPassSys;
|
||||
use framebuffer::ensure_image_view_compatible;
|
||||
use image::ImageViewAccess;
|
||||
@ -129,7 +129,10 @@ pub struct FramebufferBuilder<Rp, A> {
|
||||
attachments: A,
|
||||
}
|
||||
|
||||
impl<Rp, A> fmt::Debug for FramebufferBuilder<Rp, A> where Rp: fmt::Debug, A: fmt::Debug {
|
||||
impl<Rp, A> fmt::Debug for FramebufferBuilder<Rp, A>
|
||||
where Rp: fmt::Debug,
|
||||
A: fmt::Debug
|
||||
{
|
||||
#[inline]
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("FramebufferBuilder")
|
||||
@ -149,7 +152,7 @@ enum FramebufferBuilderDimensions {
|
||||
|
||||
impl<Rp, A> FramebufferBuilder<Rp, A>
|
||||
where Rp: RenderPassAbstract,
|
||||
A: AttachmentsList,
|
||||
A: AttachmentsList
|
||||
{
|
||||
/// Appends an attachment to the prototype of the framebuffer.
|
||||
///
|
||||
@ -167,7 +170,7 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
|
||||
|
||||
match ensure_image_view_compatible(&self.render_pass, self.raw_ids.len(), &attachment) {
|
||||
Ok(()) => (),
|
||||
Err(err) => return Err(FramebufferCreationError::IncompatibleAttachment(err))
|
||||
Err(err) => return Err(FramebufferCreationError::IncompatibleAttachment(err)),
|
||||
};
|
||||
|
||||
let img_dims = attachment.dimensions();
|
||||
@ -184,12 +187,12 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
|
||||
{
|
||||
return Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
|
||||
expected: current,
|
||||
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()]
|
||||
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()],
|
||||
});
|
||||
}
|
||||
|
||||
FramebufferBuilderDimensions::AutoIdentical(Some(current))
|
||||
}
|
||||
},
|
||||
FramebufferBuilderDimensions::AutoSmaller(None) => {
|
||||
let dims = [img_dims.width(), img_dims.height(), img_dims.array_layers()];
|
||||
FramebufferBuilderDimensions::AutoSmaller(Some(dims))
|
||||
@ -198,7 +201,7 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
|
||||
let new_dims = [
|
||||
cmp::min(current[0], img_dims.width()),
|
||||
cmp::min(current[1], img_dims.height()),
|
||||
cmp::min(current[2], img_dims.array_layers())
|
||||
cmp::min(current[2], img_dims.array_layers()),
|
||||
];
|
||||
|
||||
FramebufferBuilderDimensions::AutoSmaller(Some(new_dims))
|
||||
@ -209,16 +212,14 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
|
||||
{
|
||||
return Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
|
||||
expected: current,
|
||||
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()]
|
||||
obtained: [img_dims.width(), img_dims.height(), img_dims.array_layers()],
|
||||
});
|
||||
}
|
||||
|
||||
FramebufferBuilderDimensions::Specific([
|
||||
img_dims.width(),
|
||||
img_dims.height(),
|
||||
img_dims.array_layers()
|
||||
])
|
||||
}
|
||||
FramebufferBuilderDimensions::Specific(
|
||||
[img_dims.width(), img_dims.height(), img_dims.array_layers()],
|
||||
)
|
||||
},
|
||||
};
|
||||
|
||||
let mut raw_ids = self.raw_ids;
|
||||
@ -279,11 +280,12 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
|
||||
// Checking the dimensions against the limits.
|
||||
{
|
||||
let limits = device.physical_device().limits();
|
||||
let limits = [limits.max_framebuffer_width(), limits.max_framebuffer_height(),
|
||||
limits.max_framebuffer_layers()];
|
||||
if dimensions[0] > limits[0] || dimensions[1] > limits[1] ||
|
||||
dimensions[2] > limits[2]
|
||||
{
|
||||
let limits = [
|
||||
limits.max_framebuffer_width(),
|
||||
limits.max_framebuffer_height(),
|
||||
limits.max_framebuffer_layers(),
|
||||
];
|
||||
if dimensions[0] > limits[0] || dimensions[1] > limits[1] || dimensions[2] > limits[2] {
|
||||
return Err(FramebufferCreationError::DimensionsTooLarge);
|
||||
}
|
||||
}
|
||||
@ -304,8 +306,10 @@ impl<Rp, A> FramebufferBuilder<Rp, A>
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateFramebuffer(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateFramebuffer(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -377,7 +381,9 @@ unsafe impl<Rp, A> FramebufferAbstract for Framebuffer<Rp, A>
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A> where Rp: RenderPassDesc {
|
||||
unsafe impl<Rp, A> RenderPassDesc for Framebuffer<Rp, A>
|
||||
where Rp: RenderPassDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_attachments(&self) -> usize {
|
||||
self.render_pass.num_attachments()
|
||||
@ -418,7 +424,9 @@ unsafe impl<C, Rp, A> RenderPassDescClearValues<C> for Framebuffer<Rp, A>
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Rp, A> RenderPassAbstract for Framebuffer<Rp, A> where Rp: RenderPassAbstract {
|
||||
unsafe impl<Rp, A> RenderPassAbstract for Framebuffer<Rp, A>
|
||||
where Rp: RenderPassAbstract
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> RenderPassSys {
|
||||
self.render_pass.inner()
|
||||
@ -494,8 +502,8 @@ impl error::Error for FramebufferCreationError {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
FramebufferCreationError::OomError(_) => "no memory available",
|
||||
FramebufferCreationError::DimensionsTooLarge => "the dimensions of the framebuffer \
|
||||
are too large",
|
||||
FramebufferCreationError::DimensionsTooLarge =>
|
||||
"the dimensions of the framebuffer are too large",
|
||||
FramebufferCreationError::AttachmentDimensionsIncompatible { .. } => {
|
||||
"the attachment has a size that isn't compatible with the framebuffer dimensions"
|
||||
},
|
||||
@ -537,19 +545,20 @@ impl From<Error> for FramebufferCreationError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use format::Format;
|
||||
use framebuffer::EmptySinglePassRenderPassDesc;
|
||||
use framebuffer::Framebuffer;
|
||||
use framebuffer::FramebufferCreationError;
|
||||
use framebuffer::RenderPassDesc;
|
||||
use image::attachment::AttachmentImage;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn simple_create() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
color: {
|
||||
load: Clear,
|
||||
@ -562,22 +571,29 @@ mod tests {
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let image = AttachmentImage::new(device.clone(), [1024, 768],
|
||||
Format::R8G8B8A8Unorm).unwrap();
|
||||
let _ = Framebuffer::start(render_pass).add(image.clone()).unwrap().build().unwrap();
|
||||
let image = AttachmentImage::new(device.clone(), [1024, 768], Format::R8G8B8A8Unorm)
|
||||
.unwrap();
|
||||
let _ = Framebuffer::start(render_pass)
|
||||
.add(image.clone())
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_device_limits() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
|
||||
let rp = EmptySinglePassRenderPassDesc
|
||||
.build_render_pass(device)
|
||||
.unwrap();
|
||||
let res = Framebuffer::with_dimensions(rp, [0xffffffff, 0xffffffff, 0xffffffff]).build();
|
||||
match res {
|
||||
Err(FramebufferCreationError::DimensionsTooLarge) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -585,7 +601,8 @@ mod tests {
|
||||
fn attachment_format_mismatch() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
color: {
|
||||
load: Clear,
|
||||
@ -598,14 +615,14 @@ mod tests {
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let image = AttachmentImage::new(device.clone(), [1024, 768],
|
||||
Format::R8Unorm).unwrap();
|
||||
let image = AttachmentImage::new(device.clone(), [1024, 768], Format::R8Unorm).unwrap();
|
||||
|
||||
match Framebuffer::start(render_pass).add(image.clone()) {
|
||||
Err(FramebufferCreationError::IncompatibleAttachment(_)) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -615,7 +632,8 @@ mod tests {
|
||||
fn attachment_dims_larger_than_specified_valid() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
color: {
|
||||
load: Clear,
|
||||
@ -628,20 +646,24 @@ mod tests {
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let img = AttachmentImage::new(device.clone(), [600, 600], Format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
let _ = Framebuffer::with_dimensions(render_pass, [512, 512, 1])
|
||||
.add(img).unwrap()
|
||||
.build().unwrap();
|
||||
.add(img)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn attachment_dims_smaller_than_specified() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
color: {
|
||||
load: Clear,
|
||||
@ -654,16 +676,20 @@ mod tests {
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let img = AttachmentImage::new(device.clone(), [512, 700], Format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
match Framebuffer::with_dimensions(render_pass, [600, 600, 1]).add(img) {
|
||||
Err(FramebufferCreationError::AttachmentDimensionsIncompatible { expected, obtained }) => {
|
||||
Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
|
||||
expected,
|
||||
obtained,
|
||||
}) => {
|
||||
assert_eq!(expected, [600, 600, 1]);
|
||||
assert_eq!(obtained, [512, 700, 1]);
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -671,7 +697,8 @@ mod tests {
|
||||
fn multi_attachments_dims_not_identical() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
a: {
|
||||
load: Clear,
|
||||
@ -690,17 +717,21 @@ mod tests {
|
||||
color: [a, b],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let a = AttachmentImage::new(device.clone(), [512, 512], Format::R8G8B8A8Unorm).unwrap();
|
||||
let b = AttachmentImage::new(device.clone(), [512, 513], Format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
match Framebuffer::start(render_pass).add(a).unwrap().add(b) {
|
||||
Err(FramebufferCreationError::AttachmentDimensionsIncompatible { expected, obtained }) => {
|
||||
Err(FramebufferCreationError::AttachmentDimensionsIncompatible {
|
||||
expected,
|
||||
obtained,
|
||||
}) => {
|
||||
assert_eq!(expected, [512, 512, 1]);
|
||||
assert_eq!(obtained, [512, 513, 1]);
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -708,7 +739,8 @@ mod tests {
|
||||
fn multi_attachments_auto_smaller() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
a: {
|
||||
load: Clear,
|
||||
@ -727,19 +759,23 @@ mod tests {
|
||||
color: [a, b],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let a = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
|
||||
let b = AttachmentImage::new(device.clone(), [512, 128], Format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
let fb = Framebuffer::with_intersecting_dimensions(render_pass)
|
||||
.add(a).unwrap()
|
||||
.add(b).unwrap()
|
||||
.build().unwrap();
|
||||
.add(a)
|
||||
.unwrap()
|
||||
.add(b)
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
match (fb.width(), fb.height(), fb.layers()) {
|
||||
(256, 128, 1) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -747,7 +783,8 @@ mod tests {
|
||||
fn not_enough_attachments() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
a: {
|
||||
load: Clear,
|
||||
@ -766,18 +803,22 @@ mod tests {
|
||||
color: [a, b],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let img = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
let res = Framebuffer::with_intersecting_dimensions(render_pass)
|
||||
.add(img).unwrap()
|
||||
.add(img)
|
||||
.unwrap()
|
||||
.build();
|
||||
|
||||
match res {
|
||||
Err(FramebufferCreationError::AttachmentsCountMismatch { expected: 2,
|
||||
obtained: 1 }) => (),
|
||||
_ => panic!()
|
||||
Err(FramebufferCreationError::AttachmentsCountMismatch {
|
||||
expected: 2,
|
||||
obtained: 1,
|
||||
}) => (),
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -785,7 +826,8 @@ mod tests {
|
||||
fn too_many_attachments() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
|
||||
let render_pass = Arc::new(
|
||||
single_pass_renderpass!(device.clone(),
|
||||
attachments: {
|
||||
a: {
|
||||
load: Clear,
|
||||
@ -798,19 +840,23 @@ mod tests {
|
||||
color: [a],
|
||||
depth_stencil: {}
|
||||
}
|
||||
).unwrap());
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let a = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
|
||||
let b = AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8Unorm).unwrap();
|
||||
|
||||
let res = Framebuffer::with_intersecting_dimensions(render_pass)
|
||||
.add(a).unwrap()
|
||||
.add(a)
|
||||
.unwrap()
|
||||
.add(b);
|
||||
|
||||
match res {
|
||||
Err(FramebufferCreationError::AttachmentsCountMismatch { expected: 1,
|
||||
obtained: 2 }) => (),
|
||||
_ => panic!()
|
||||
Err(FramebufferCreationError::AttachmentsCountMismatch {
|
||||
expected: 1,
|
||||
obtained: 2,
|
||||
}) => (),
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -818,19 +864,25 @@ mod tests {
|
||||
fn empty_working() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
|
||||
let _ = Framebuffer::with_dimensions(rp, [512, 512, 1]).build().unwrap();
|
||||
let rp = EmptySinglePassRenderPassDesc
|
||||
.build_render_pass(device)
|
||||
.unwrap();
|
||||
let _ = Framebuffer::with_dimensions(rp, [512, 512, 1])
|
||||
.build()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cant_determine_dimensions_auto() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
|
||||
let rp = EmptySinglePassRenderPassDesc
|
||||
.build_render_pass(device)
|
||||
.unwrap();
|
||||
let res = Framebuffer::start(rp).build();
|
||||
match res {
|
||||
Err(FramebufferCreationError::CantDetermineDimensions) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -838,11 +890,13 @@ mod tests {
|
||||
fn cant_determine_dimensions_intersect() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
|
||||
let rp = EmptySinglePassRenderPassDesc.build_render_pass(device).unwrap();
|
||||
let rp = EmptySinglePassRenderPassDesc
|
||||
.build_render_pass(device)
|
||||
.unwrap();
|
||||
let res = Framebuffer::with_intersecting_dimensions(rp).build();
|
||||
match res {
|
||||
Err(FramebufferCreationError::CantDetermineDimensions) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -91,17 +91,17 @@
|
||||
//!
|
||||
|
||||
pub use self::attachments_list::AttachmentsList;
|
||||
pub use self::compat_atch::ensure_image_view_compatible;
|
||||
pub use self::compat_atch::IncompatibleRenderPassAttachmentError;
|
||||
pub use self::compat_atch::ensure_image_view_compatible;
|
||||
pub use self::desc::LayoutAttachmentDescription;
|
||||
pub use self::desc::LayoutPassDescription;
|
||||
pub use self::desc::LayoutPassDependencyDescription;
|
||||
pub use self::desc::LayoutPassDescription;
|
||||
pub use self::desc::LoadOp;
|
||||
pub use self::desc::RenderPassDesc;
|
||||
pub use self::desc::RenderPassDescAttachments;
|
||||
pub use self::desc::RenderPassDescSubpasses;
|
||||
pub use self::desc::RenderPassDescDependencies;
|
||||
pub use self::desc::RenderPassDescSubpasses;
|
||||
pub use self::desc::StoreOp;
|
||||
pub use self::desc::LoadOp;
|
||||
pub use self::empty::EmptySinglePassRenderPassDesc;
|
||||
pub use self::framebuffer::Framebuffer;
|
||||
pub use self::framebuffer::FramebufferBuilder;
|
||||
@ -111,9 +111,9 @@ pub use self::sys::RenderPass;
|
||||
pub use self::sys::RenderPassCreationError;
|
||||
pub use self::sys::RenderPassSys;
|
||||
pub use self::traits::FramebufferAbstract;
|
||||
pub use self::traits::RenderPassDescClearValues;
|
||||
pub use self::traits::RenderPassCompatible;
|
||||
pub use self::traits::RenderPassAbstract;
|
||||
pub use self::traits::RenderPassCompatible;
|
||||
pub use self::traits::RenderPassDescClearValues;
|
||||
pub use self::traits::RenderPassSubpassInterface;
|
||||
pub use self::traits::Subpass;
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
@ -14,7 +15,6 @@ use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
@ -24,9 +24,9 @@ use framebuffer::LayoutAttachmentDescription;
|
||||
use framebuffer::LayoutPassDependencyDescription;
|
||||
use framebuffer::LayoutPassDescription;
|
||||
use framebuffer::LoadOp;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use framebuffer::RenderPassDesc;
|
||||
use framebuffer::RenderPassAbstract;
|
||||
use framebuffer::RenderPassDesc;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
|
||||
use Error;
|
||||
use OomError;
|
||||
@ -52,7 +52,9 @@ pub struct RenderPass<D> {
|
||||
granularity: Mutex<Option<[u32; 2]>>,
|
||||
}
|
||||
|
||||
impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
impl<D> RenderPass<D>
|
||||
where D: RenderPassDesc
|
||||
{
|
||||
/// Builds a new render pass.
|
||||
///
|
||||
/// # Panic
|
||||
@ -62,28 +64,46 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
/// mode.
|
||||
///
|
||||
pub fn new(device: Arc<Device>, description: D)
|
||||
-> Result<RenderPass<D>, RenderPassCreationError>
|
||||
{
|
||||
-> Result<RenderPass<D>, RenderPassCreationError> {
|
||||
let vk = device.pointers();
|
||||
|
||||
// If the first use of an attachment in this render pass is as an input attachment, and
|
||||
// the attachment is not also used as a color or depth/stencil attachment in the same
|
||||
// subpass, then loadOp must not be VK_ATTACHMENT_LOAD_OP_CLEAR
|
||||
debug_assert!(description.attachment_descs().enumerate().all(|(atch_num, attachment)| {
|
||||
debug_assert!(description.attachment_descs().enumerate().all(|(atch_num,
|
||||
attachment)| {
|
||||
if attachment.load != LoadOp::Clear {
|
||||
return true;
|
||||
}
|
||||
|
||||
for p in description.subpass_descs() {
|
||||
if p.color_attachments.iter().find(|&&(a, _)| a == atch_num).is_some() { return true; }
|
||||
if let Some((a, _)) = p.depth_stencil { if a == atch_num { return true; } }
|
||||
if p.input_attachments.iter().find(|&&(a, _)| a == atch_num).is_some() { return false; }
|
||||
if p.color_attachments
|
||||
.iter()
|
||||
.find(|&&(a, _)| a == atch_num)
|
||||
.is_some()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if let Some((a, _)) = p.depth_stencil {
|
||||
if a == atch_num {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if p.input_attachments
|
||||
.iter()
|
||||
.find(|&&(a, _)| a == atch_num)
|
||||
.is_some()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}));
|
||||
|
||||
let attachments = description.attachment_descs().map(|attachment| {
|
||||
let attachments = description
|
||||
.attachment_descs()
|
||||
.map(|attachment| {
|
||||
debug_assert!(attachment.samples.is_power_of_two());
|
||||
|
||||
vk::AttachmentDescription {
|
||||
@ -97,7 +117,8 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
initialLayout: attachment.initial_layout as u32,
|
||||
finalLayout: attachment.final_layout as u32,
|
||||
}
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
})
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
// We need to pass pointers to vkAttachmentReference structs when creating the render pass.
|
||||
// Therefore we need to allocate them in advance.
|
||||
@ -105,71 +126,102 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
// This block allocates, for each pass, in order, all color attachment references, then all
|
||||
// input attachment references, then all resolve attachment references, then the depth
|
||||
// stencil attachment reference.
|
||||
let attachment_references = description.subpass_descs().flat_map(|pass| {
|
||||
let attachment_references = description
|
||||
.subpass_descs()
|
||||
.flat_map(|pass| {
|
||||
// Performing some validation with debug asserts.
|
||||
debug_assert!(pass.resolve_attachments.is_empty() ||
|
||||
pass.resolve_attachments.len() == pass.color_attachments.len());
|
||||
debug_assert!(pass.resolve_attachments.iter().all(|a| {
|
||||
attachments[a.0].samples == 1
|
||||
}));
|
||||
debug_assert!(pass.resolve_attachments
|
||||
.iter()
|
||||
.all(|a| attachments[a.0].samples == 1));
|
||||
debug_assert!(pass.resolve_attachments.is_empty() ||
|
||||
pass.color_attachments.iter().all(|a| {
|
||||
attachments[a.0].samples > 1
|
||||
}));
|
||||
pass.color_attachments
|
||||
.iter()
|
||||
.all(|a| attachments[a.0].samples > 1));
|
||||
debug_assert!(pass.resolve_attachments.is_empty() ||
|
||||
pass.resolve_attachments.iter().zip(pass.color_attachments.iter())
|
||||
pass.resolve_attachments
|
||||
.iter()
|
||||
.zip(pass.color_attachments.iter())
|
||||
.all(|(r, c)| {
|
||||
attachments[r.0].format == attachments[c.0].format
|
||||
}));
|
||||
debug_assert!(pass.color_attachments.iter().cloned()
|
||||
debug_assert!(pass.color_attachments
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(pass.depth_stencil.clone().into_iter())
|
||||
.chain(pass.input_attachments.iter().cloned())
|
||||
.chain(pass.resolve_attachments.iter().cloned())
|
||||
.all(|(a, _)| {
|
||||
pass.preserve_attachments.iter().find(|&&b| a == b).is_none()
|
||||
pass.preserve_attachments
|
||||
.iter()
|
||||
.find(|&&b| a == b)
|
||||
.is_none()
|
||||
}));
|
||||
debug_assert!(pass.color_attachments.iter().cloned()
|
||||
debug_assert!(
|
||||
pass.color_attachments
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(pass.depth_stencil.clone().into_iter())
|
||||
.all(|(atch, layout)| {
|
||||
if let Some(r) = pass.input_attachments.iter()
|
||||
.find(|r| r.0 == atch)
|
||||
.all(|(atch, layout)| if let Some(r) =
|
||||
pass.input_attachments.iter().find(|r| r.0 == atch)
|
||||
{
|
||||
r.1 == layout
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}));
|
||||
})
|
||||
);
|
||||
|
||||
let resolve = pass.resolve_attachments.into_iter().map(|(offset, img_la)| {
|
||||
let resolve = pass.resolve_attachments
|
||||
.into_iter()
|
||||
.map(|(offset, img_la)| {
|
||||
debug_assert!(offset < attachments.len());
|
||||
vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, }
|
||||
vk::AttachmentReference {
|
||||
attachment: offset as u32,
|
||||
layout: img_la as u32,
|
||||
}
|
||||
});
|
||||
|
||||
let color = pass.color_attachments.into_iter().map(|(offset, img_la)| {
|
||||
debug_assert!(offset < attachments.len());
|
||||
vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, }
|
||||
vk::AttachmentReference {
|
||||
attachment: offset as u32,
|
||||
layout: img_la as u32,
|
||||
}
|
||||
});
|
||||
|
||||
let input = pass.input_attachments.into_iter().map(|(offset, img_la)| {
|
||||
debug_assert!(offset < attachments.len());
|
||||
vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, }
|
||||
vk::AttachmentReference {
|
||||
attachment: offset as u32,
|
||||
layout: img_la as u32,
|
||||
}
|
||||
});
|
||||
|
||||
let depthstencil = if let Some((offset, img_la)) = pass.depth_stencil {
|
||||
Some(vk::AttachmentReference { attachment: offset as u32, layout: img_la as u32, })
|
||||
Some(vk::AttachmentReference {
|
||||
attachment: offset as u32,
|
||||
layout: img_la as u32,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}.into_iter();
|
||||
|
||||
color.chain(input).chain(resolve).chain(depthstencil)
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
})
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
// Same as `attachment_references` but only for the preserve attachments.
|
||||
// This is separate because attachment references are u32s and not `vkAttachmentReference`
|
||||
// structs.
|
||||
let preserve_attachments_references = description.subpass_descs().flat_map(|pass| {
|
||||
pass.preserve_attachments.into_iter().map(|offset| offset as u32)
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
let preserve_attachments_references = description
|
||||
.subpass_descs()
|
||||
.flat_map(|pass| {
|
||||
pass.preserve_attachments
|
||||
.into_iter()
|
||||
.map(|offset| offset as u32)
|
||||
})
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
// Now iterating over passes.
|
||||
let passes = unsafe {
|
||||
@ -201,7 +253,8 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
ptr::null()
|
||||
};
|
||||
|
||||
let preserve_attachments = preserve_attachments_references.as_ptr()
|
||||
let preserve_attachments = preserve_attachments_references
|
||||
.as_ptr()
|
||||
.offset(preserve_ref_index as isize);
|
||||
preserve_ref_index += pass.preserve_attachments.len();
|
||||
|
||||
@ -209,17 +262,29 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
flags: 0, // reserved
|
||||
pipelineBindPoint: vk::PIPELINE_BIND_POINT_GRAPHICS,
|
||||
inputAttachmentCount: pass.input_attachments.len() as u32,
|
||||
pInputAttachments: if pass.input_attachments.is_empty() { ptr::null() }
|
||||
else { input_attachments },
|
||||
pInputAttachments: if pass.input_attachments.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
input_attachments
|
||||
},
|
||||
colorAttachmentCount: pass.color_attachments.len() as u32,
|
||||
pColorAttachments: if pass.color_attachments.is_empty() { ptr::null() }
|
||||
else { color_attachments },
|
||||
pResolveAttachments: if pass.resolve_attachments.is_empty() { ptr::null() }
|
||||
else { resolve_attachments },
|
||||
pColorAttachments: if pass.color_attachments.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
color_attachments
|
||||
},
|
||||
pResolveAttachments: if pass.resolve_attachments.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
resolve_attachments
|
||||
},
|
||||
pDepthStencilAttachment: depth_stencil,
|
||||
preserveAttachmentCount: pass.preserve_attachments.len() as u32,
|
||||
pPreserveAttachments: if pass.preserve_attachments.is_empty() { ptr::null() }
|
||||
else { preserve_attachments },
|
||||
pPreserveAttachments: if pass.preserve_attachments.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
preserve_attachments
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
@ -231,7 +296,9 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
out
|
||||
};
|
||||
|
||||
let dependencies = description.dependency_descs().map(|dependency| {
|
||||
let dependencies = description
|
||||
.dependency_descs()
|
||||
.map(|dependency| {
|
||||
debug_assert!(dependency.source_subpass < passes.len());
|
||||
debug_assert!(dependency.destination_subpass < passes.len());
|
||||
|
||||
@ -242,9 +309,14 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
dstStageMask: dependency.dst_stages.into(),
|
||||
srcAccessMask: dependency.src_access.into(),
|
||||
dstAccessMask: dependency.dst_access.into(),
|
||||
dependencyFlags: if dependency.by_region { vk::DEPENDENCY_BY_REGION_BIT } else { 0 },
|
||||
dependencyFlags: if dependency.by_region {
|
||||
vk::DEPENDENCY_BY_REGION_BIT
|
||||
} else {
|
||||
0
|
||||
},
|
||||
}
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
})
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
let render_pass = unsafe {
|
||||
let infos = vk::RenderPassCreateInfo {
|
||||
@ -252,18 +324,30 @@ impl<D> RenderPass<D> where D: RenderPassDesc {
|
||||
pNext: ptr::null(),
|
||||
flags: 0, // reserved
|
||||
attachmentCount: attachments.len() as u32,
|
||||
pAttachments: if attachments.is_empty() { ptr::null() }
|
||||
else { attachments.as_ptr() },
|
||||
pAttachments: if attachments.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
attachments.as_ptr()
|
||||
},
|
||||
subpassCount: passes.len() as u32,
|
||||
pSubpasses: if passes.is_empty() { ptr::null() } else { passes.as_ptr() },
|
||||
pSubpasses: if passes.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
passes.as_ptr()
|
||||
},
|
||||
dependencyCount: dependencies.len() as u32,
|
||||
pDependencies: if dependencies.is_empty() { ptr::null() }
|
||||
else { dependencies.as_ptr() },
|
||||
pDependencies: if dependencies.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
dependencies.as_ptr()
|
||||
},
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateRenderPass(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateRenderPass(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -281,9 +365,9 @@ impl RenderPass<EmptySinglePassRenderPassDesc> {
|
||||
///
|
||||
/// This method is useful for quick tests.
|
||||
#[inline]
|
||||
pub fn empty_single_pass(device: Arc<Device>)
|
||||
-> Result<RenderPass<EmptySinglePassRenderPassDesc>, RenderPassCreationError>
|
||||
{
|
||||
pub fn empty_single_pass(
|
||||
device: Arc<Device>)
|
||||
-> Result<RenderPass<EmptySinglePassRenderPassDesc>, RenderPassCreationError> {
|
||||
RenderPass::new(device, EmptySinglePassRenderPassDesc)
|
||||
}
|
||||
}
|
||||
@ -304,8 +388,7 @@ impl<D> RenderPass<D> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
let mut out = mem::uninitialized();
|
||||
vk.GetRenderAreaGranularity(self.device.internal_object(),
|
||||
self.render_pass, &mut out);
|
||||
vk.GetRenderAreaGranularity(self.device.internal_object(), self.render_pass, &mut out);
|
||||
|
||||
debug_assert_ne!(out.width, 0);
|
||||
debug_assert_ne!(out.height, 0);
|
||||
@ -325,7 +408,9 @@ impl<D> RenderPass<D> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> RenderPassDesc for RenderPass<D> where D: RenderPassDesc {
|
||||
unsafe impl<D> RenderPassDesc for RenderPass<D>
|
||||
where D: RenderPassDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_attachments(&self) -> usize {
|
||||
self.desc.num_attachments()
|
||||
@ -366,7 +451,9 @@ unsafe impl<C, D> RenderPassDescClearValues<C> for RenderPass<D>
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> RenderPassAbstract for RenderPass<D> where D: RenderPassDesc {
|
||||
unsafe impl<D> RenderPassAbstract for RenderPass<D>
|
||||
where D: RenderPassDesc
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> RenderPassSys {
|
||||
RenderPassSys(self.render_pass, PhantomData)
|
||||
@ -380,7 +467,9 @@ unsafe impl<D> DeviceOwned for RenderPass<D> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> fmt::Debug for RenderPass<D> where D: fmt::Debug {
|
||||
impl<D> fmt::Debug for RenderPass<D>
|
||||
where D: fmt::Debug
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
fmt.debug_struct("RenderPass")
|
||||
.field("raw", &self.render_pass)
|
||||
@ -437,7 +526,7 @@ impl error::Error for RenderPassCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
RenderPassCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -466,7 +555,7 @@ impl From<Error> for RenderPassCreationError {
|
||||
err @ Error::OutOfDeviceMemory => {
|
||||
RenderPassCreationError::OomError(OomError::from(err))
|
||||
},
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -491,7 +580,8 @@ mod tests {
|
||||
return; // test ignored
|
||||
}
|
||||
|
||||
let rp = single_pass_renderpass! {
|
||||
let rp =
|
||||
single_pass_renderpass! {
|
||||
device.clone(),
|
||||
attachments: {
|
||||
a1: { load: Clear, store: DontCare, format: Format::R8G8B8A8Unorm, samples: 1, },
|
||||
@ -513,7 +603,7 @@ mod tests {
|
||||
|
||||
match rp {
|
||||
Err(RenderPassCreationError::ColorAttachmentsLimitExceeded) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,10 @@ pub unsafe trait FramebufferAbstract: RenderPassAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> FramebufferAbstract for T where T: SafeDeref, T::Target: FramebufferAbstract {
|
||||
unsafe impl<T> FramebufferAbstract for T
|
||||
where T: SafeDeref,
|
||||
T::Target: FramebufferAbstract
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> FramebufferSys {
|
||||
FramebufferAbstract::inner(&**self)
|
||||
@ -99,7 +102,10 @@ pub unsafe trait RenderPassAbstract: DeviceOwned + RenderPassDesc {
|
||||
fn inner(&self) -> RenderPassSys;
|
||||
}
|
||||
|
||||
unsafe impl<T> RenderPassAbstract for T where T: SafeDeref, T::Target: RenderPassAbstract {
|
||||
unsafe impl<T> RenderPassAbstract for T
|
||||
where T: SafeDeref,
|
||||
T::Target: RenderPassAbstract
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> RenderPassSys {
|
||||
(**self).inner()
|
||||
@ -137,7 +143,8 @@ pub unsafe trait RenderPassDescClearValues<C> {
|
||||
}
|
||||
|
||||
unsafe impl<T, C> RenderPassDescClearValues<C> for T
|
||||
where T: SafeDeref, T::Target: RenderPassDescClearValues<C>
|
||||
where T: SafeDeref,
|
||||
T::Target: RenderPassDescClearValues<C>
|
||||
{
|
||||
#[inline]
|
||||
fn convert_clear_values(&self, vals: C) -> Box<Iterator<Item = ClearValue>> {
|
||||
@ -164,10 +171,13 @@ pub unsafe trait RenderPassSubpassInterface<Other: ?Sized>: RenderPassDesc
|
||||
}
|
||||
|
||||
unsafe impl<A, B: ?Sized> RenderPassSubpassInterface<B> for A
|
||||
where A: RenderPassDesc, B: ShaderInterfaceDef
|
||||
where A: RenderPassDesc,
|
||||
B: ShaderInterfaceDef
|
||||
{
|
||||
fn is_compatible_with(&self, subpass: u32, other: &B) -> bool {
|
||||
let pass_descr = match RenderPassDesc::subpass_descs(self).skip(subpass as usize).next() {
|
||||
let pass_descr = match RenderPassDesc::subpass_descs(self)
|
||||
.skip(subpass as usize)
|
||||
.next() {
|
||||
Some(s) => s,
|
||||
None => return false,
|
||||
};
|
||||
@ -179,7 +189,11 @@ unsafe impl<A, B: ?Sized> RenderPassSubpassInterface<B> for A
|
||||
None => return false,
|
||||
};
|
||||
|
||||
let attachment_desc = (&self).attachment_descs().skip(attachment_id).next().unwrap();
|
||||
let attachment_desc = (&self)
|
||||
.attachment_descs()
|
||||
.skip(attachment_id)
|
||||
.next()
|
||||
.unwrap();
|
||||
|
||||
// FIXME: compare formats depending on the number of components and data type
|
||||
/*if attachment_desc.format != element.format {
|
||||
@ -201,7 +215,9 @@ unsafe impl<A, B: ?Sized> RenderPassSubpassInterface<B> for A
|
||||
// TODO: once specialization lands, this trait can be specialized for pairs that are known to
|
||||
// always be compatible
|
||||
// TODO: maybe this can be unimplemented on some pairs, to provide compile-time checks?
|
||||
pub unsafe trait RenderPassCompatible<Other: ?Sized>: RenderPassDesc where Other: RenderPassDesc {
|
||||
pub unsafe trait RenderPassCompatible<Other: ?Sized>: RenderPassDesc
|
||||
where Other: RenderPassDesc
|
||||
{
|
||||
/// Returns `true` if this layout is compatible with the other layout, as defined in the
|
||||
/// `Render Pass Compatibility` section of the Vulkan specs.
|
||||
// TODO: return proper error
|
||||
@ -209,7 +225,8 @@ pub unsafe trait RenderPassCompatible<Other: ?Sized>: RenderPassDesc where Other
|
||||
}
|
||||
|
||||
unsafe impl<A, B: ?Sized> RenderPassCompatible<B> for A
|
||||
where A: RenderPassDesc, B: RenderPassDesc
|
||||
where A: RenderPassDesc,
|
||||
B: RenderPassDesc
|
||||
{
|
||||
fn is_compatible_with(&self, other: &B) -> bool {
|
||||
// FIXME:
|
||||
@ -237,7 +254,9 @@ pub struct Subpass<L> {
|
||||
subpass_id: u32,
|
||||
}
|
||||
|
||||
impl<L> Subpass<L> where L: RenderPassDesc {
|
||||
impl<L> Subpass<L>
|
||||
where L: RenderPassDesc
|
||||
{
|
||||
/// Returns a handle that represents a subpass of a render pass.
|
||||
#[inline]
|
||||
pub fn from(render_pass: L, id: u32) -> Option<Subpass<L>> {
|
||||
@ -255,7 +274,9 @@ impl<L> Subpass<L> where L: RenderPassDesc {
|
||||
/// Returns the number of color attachments in this subpass.
|
||||
#[inline]
|
||||
pub fn num_color_attachments(&self) -> u32 {
|
||||
self.render_pass.num_color_attachments(self.subpass_id).unwrap()
|
||||
self.render_pass
|
||||
.num_color_attachments(self.subpass_id)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Returns true if the subpass has a depth attachment or a depth-stencil attachment.
|
||||
@ -268,7 +289,9 @@ impl<L> Subpass<L> where L: RenderPassDesc {
|
||||
/// layout is not `DepthStencilReadOnlyOptimal`.
|
||||
#[inline]
|
||||
pub fn has_writable_depth(&self) -> bool {
|
||||
self.render_pass.has_writable_depth(self.subpass_id).unwrap()
|
||||
self.render_pass
|
||||
.has_writable_depth(self.subpass_id)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Returns true if the subpass has a stencil attachment or a depth-stencil attachment.
|
||||
@ -281,14 +304,18 @@ impl<L> Subpass<L> where L: RenderPassDesc {
|
||||
/// layout is not `DepthStencilReadOnlyOptimal`.
|
||||
#[inline]
|
||||
pub fn has_writable_stencil(&self) -> bool {
|
||||
self.render_pass.has_writable_stencil(self.subpass_id).unwrap()
|
||||
self.render_pass
|
||||
.has_writable_stencil(self.subpass_id)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Returns true if the subpass has any color or depth/stencil attachment.
|
||||
#[inline]
|
||||
pub fn has_color_or_depth_stencil_attachment(&self) -> bool {
|
||||
self.num_color_attachments() >= 1 ||
|
||||
self.render_pass.has_depth_stencil_attachment(self.subpass_id).unwrap() != (false, false)
|
||||
self.render_pass
|
||||
.has_depth_stencil_attachment(self.subpass_id)
|
||||
.unwrap() != (false, false)
|
||||
}
|
||||
|
||||
/// Returns the number of samples in the color and/or depth/stencil attachments. Returns `None`
|
||||
|
@ -21,10 +21,10 @@ use format::FormatDesc;
|
||||
use format::FormatTy;
|
||||
use image::Dimensions;
|
||||
use image::ImageDimensions;
|
||||
use image::ViewType;
|
||||
use image::sys::ImageCreationError;
|
||||
use image::ImageLayout;
|
||||
use image::ImageUsage;
|
||||
use image::ViewType;
|
||||
use image::sys::ImageCreationError;
|
||||
use image::sys::UnsafeImage;
|
||||
use image::sys::UnsafeImageView;
|
||||
use image::traits::ImageAccess;
|
||||
@ -158,7 +158,8 @@ impl<F> AttachmentImage<F> {
|
||||
/// > **Note**: You can also use this function and pass `1` for the number of samples if you
|
||||
/// > want a regular image.
|
||||
#[inline]
|
||||
pub fn transient_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32, format: F)
|
||||
pub fn transient_multisampled(device: Arc<Device>, dimensions: [u32; 2], samples: u32,
|
||||
format: F)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
@ -171,7 +172,8 @@ impl<F> AttachmentImage<F> {
|
||||
}
|
||||
|
||||
fn new_impl(device: Arc<Device>, dimensions: [u32; 2], format: F, base_usage: ImageUsage,
|
||||
samples: u32) -> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
samples: u32)
|
||||
-> Result<Arc<AttachmentImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc
|
||||
{
|
||||
// TODO: check dimensions against the max_framebuffer_width/height/layers limits
|
||||
@ -181,7 +183,7 @@ impl<F> AttachmentImage<F> {
|
||||
FormatTy::DepthStencil => true,
|
||||
FormatTy::Stencil => true,
|
||||
FormatTy::Compressed => panic!(),
|
||||
_ => false
|
||||
_ => false,
|
||||
};
|
||||
|
||||
let usage = ImageUsage {
|
||||
@ -195,38 +197,55 @@ impl<F> AttachmentImage<F> {
|
||||
width: dimensions[0],
|
||||
height: dimensions[1],
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false
|
||||
cubemap_compatible: false,
|
||||
};
|
||||
|
||||
try!(UnsafeImage::new(device.clone(), usage, format.format(), dims,
|
||||
samples, 1, Sharing::Exclusive::<Empty<u32>>, false, false))
|
||||
UnsafeImage::new(device.clone(),
|
||||
usage,
|
||||
format.format(),
|
||||
dims,
|
||||
samples,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<u32>>,
|
||||
false,
|
||||
false)?
|
||||
};
|
||||
|
||||
let mem_ty = {
|
||||
let device_local = device.physical_device().memory_types()
|
||||
let device_local = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|t| t.is_device_local());
|
||||
let any = device.physical_device().memory_types()
|
||||
let any = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
|
||||
device_local.chain(any).next().unwrap()
|
||||
};
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Optimal));
|
||||
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
|
||||
mem_ty,
|
||||
mem_reqs.size,
|
||||
mem_reqs.alignment,
|
||||
AllocLayout::Optimal)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe { try!(image.bind_memory(mem.memory(), mem.offset())); }
|
||||
unsafe {
|
||||
image.bind_memory(mem.memory(), mem.offset())?;
|
||||
}
|
||||
|
||||
let view = unsafe {
|
||||
try!(UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1))
|
||||
};
|
||||
let view = unsafe { UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1)? };
|
||||
|
||||
Ok(Arc::new(AttachmentImage {
|
||||
image: image,
|
||||
view: view,
|
||||
memory: mem,
|
||||
format: format,
|
||||
attachment_layout: if is_depth { ImageLayout::DepthStencilAttachmentOptimal }
|
||||
else { ImageLayout::ColorAttachmentOptimal },
|
||||
attachment_layout: if is_depth {
|
||||
ImageLayout::DepthStencilAttachmentOptimal
|
||||
} else {
|
||||
ImageLayout::ColorAttachmentOptimal
|
||||
},
|
||||
gpu_lock: AtomicUsize::new(0),
|
||||
}))
|
||||
}
|
||||
@ -315,7 +334,10 @@ unsafe impl<F, A> ImageViewAccess for AttachmentImage<F, A>
|
||||
#[inline]
|
||||
fn dimensions(&self) -> Dimensions {
|
||||
let dims = self.image.dimensions();
|
||||
Dimensions::Dim2d { width: dims.width(), height: dims.height() }
|
||||
Dimensions::Dim2d {
|
||||
width: dims.width(),
|
||||
height: dims.height(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -7,8 +7,8 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::sync::Arc;
|
||||
use smallvec::SmallVec;
|
||||
use std::sync::Arc;
|
||||
|
||||
use device::Device;
|
||||
use device::Queue;
|
||||
@ -16,10 +16,10 @@ use format::Format;
|
||||
use format::FormatDesc;
|
||||
use image::Dimensions;
|
||||
use image::ImageDimensions;
|
||||
use image::MipmapsCount;
|
||||
use image::sys::ImageCreationError;
|
||||
use image::ImageLayout;
|
||||
use image::ImageUsage;
|
||||
use image::MipmapsCount;
|
||||
use image::sys::ImageCreationError;
|
||||
use image::sys::UnsafeImage;
|
||||
use image::sys::UnsafeImageView;
|
||||
use image::traits::ImageAccess;
|
||||
@ -37,7 +37,9 @@ use sync::Sharing;
|
||||
/// but then you must only ever read from it. TODO: clarify because of blit operations
|
||||
// TODO: type (2D, 3D, array, etc.) as template parameter
|
||||
#[derive(Debug)]
|
||||
pub struct ImmutableImage<F, A = Arc<StdMemoryPool>> where A: MemoryPool {
|
||||
pub struct ImmutableImage<F, A = Arc<StdMemoryPool>>
|
||||
where A: MemoryPool
|
||||
{
|
||||
image: UnsafeImage,
|
||||
view: UnsafeImageView,
|
||||
dimensions: Dimensions,
|
||||
@ -51,16 +53,23 @@ impl<F> ImmutableImage<F> {
|
||||
#[inline]
|
||||
pub fn new<'a, I>(device: Arc<Device>, dimensions: Dimensions, format: F, queue_families: I)
|
||||
-> Result<Arc<ImmutableImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc, I: IntoIterator<Item = QueueFamily<'a>>
|
||||
where F: FormatDesc,
|
||||
I: IntoIterator<Item = QueueFamily<'a>>
|
||||
{
|
||||
ImmutableImage::with_mipmaps(device, dimensions, format, MipmapsCount::One, queue_families)
|
||||
ImmutableImage::with_mipmaps(device,
|
||||
dimensions,
|
||||
format,
|
||||
MipmapsCount::One,
|
||||
queue_families)
|
||||
}
|
||||
|
||||
/// Builds a new immutable image with the given number of mipmaps.
|
||||
pub fn with_mipmaps<'a, I, M>(device: Arc<Device>, dimensions: Dimensions, format: F,
|
||||
mipmaps: M, queue_families: I)
|
||||
-> Result<Arc<ImmutableImage<F>>, ImageCreationError>
|
||||
where F: FormatDesc, I: IntoIterator<Item = QueueFamily<'a>>, M: Into<MipmapsCount>
|
||||
where F: FormatDesc,
|
||||
I: IntoIterator<Item = QueueFamily<'a>>,
|
||||
M: Into<MipmapsCount>
|
||||
{
|
||||
let usage = ImageUsage {
|
||||
transfer_source: true, // for blits
|
||||
@ -69,7 +78,9 @@ impl<F> ImmutableImage<F> {
|
||||
..ImageUsage::none()
|
||||
};
|
||||
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id())
|
||||
let queue_families = queue_families
|
||||
.into_iter()
|
||||
.map(|f| f.id())
|
||||
.collect::<SmallVec<[u32; 4]>>();
|
||||
|
||||
let (image, mem_reqs) = unsafe {
|
||||
@ -79,27 +90,45 @@ impl<F> ImmutableImage<F> {
|
||||
Sharing::Exclusive
|
||||
};
|
||||
|
||||
try!(UnsafeImage::new(device.clone(), usage, format.format(), dimensions.to_image_dimensions(),
|
||||
1, mipmaps, sharing, false, false))
|
||||
UnsafeImage::new(device.clone(),
|
||||
usage,
|
||||
format.format(),
|
||||
dimensions.to_image_dimensions(),
|
||||
1,
|
||||
mipmaps,
|
||||
sharing,
|
||||
false,
|
||||
false)?
|
||||
};
|
||||
|
||||
let mem_ty = {
|
||||
let device_local = device.physical_device().memory_types()
|
||||
let device_local = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|t| t.is_device_local());
|
||||
let any = device.physical_device().memory_types()
|
||||
let any = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
|
||||
device_local.chain(any).next().unwrap()
|
||||
};
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Optimal));
|
||||
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
|
||||
mem_ty,
|
||||
mem_reqs.size,
|
||||
mem_reqs.alignment,
|
||||
AllocLayout::Optimal)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe { try!(image.bind_memory(mem.memory(), mem.offset())); }
|
||||
unsafe {
|
||||
image.bind_memory(mem.memory(), mem.offset())?;
|
||||
}
|
||||
|
||||
let view = unsafe {
|
||||
try!(UnsafeImageView::raw(&image, dimensions.to_view_type(), 0 .. image.mipmap_levels(),
|
||||
0 .. image.dimensions().array_layers()))
|
||||
UnsafeImageView::raw(&image,
|
||||
dimensions.to_view_type(),
|
||||
0 .. image.mipmap_levels(),
|
||||
0 .. image.dimensions().array_layers())?
|
||||
};
|
||||
|
||||
Ok(Arc::new(ImmutableImage {
|
||||
@ -112,7 +141,9 @@ impl<F> ImmutableImage<F> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, A> ImmutableImage<F, A> where A: MemoryPool {
|
||||
impl<F, A> ImmutableImage<F, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
/// Returns the dimensions of the image.
|
||||
#[inline]
|
||||
pub fn dimensions(&self) -> Dimensions {
|
||||
@ -126,7 +157,10 @@ impl<F, A> ImmutableImage<F, A> where A: MemoryPool {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<F, A> ImageAccess for ImmutableImage<F, A> where F: 'static + Send + Sync, A: MemoryPool {
|
||||
unsafe impl<F, A> ImageAccess for ImmutableImage<F, A>
|
||||
where F: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> &UnsafeImage {
|
||||
&self.image
|
||||
@ -164,7 +198,8 @@ unsafe impl<F, A> ImageAccess for ImmutableImage<F, A> where F: 'static + Send +
|
||||
}
|
||||
|
||||
unsafe impl<P, F, A> ImageContent<P> for ImmutableImage<F, A>
|
||||
where F: 'static + Send + Sync, A: MemoryPool
|
||||
where F: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn matches_format(&self) -> bool {
|
||||
@ -173,7 +208,8 @@ unsafe impl<P, F, A> ImageContent<P> for ImmutableImage<F, A>
|
||||
}
|
||||
|
||||
unsafe impl<F: 'static, A> ImageViewAccess for ImmutableImage<F, A>
|
||||
where F: 'static + Send + Sync, A: MemoryPool
|
||||
where F: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn parent(&self) -> &ImageAccess {
|
||||
|
@ -144,7 +144,11 @@ pub enum Dimensions {
|
||||
Dim1d { width: u32 },
|
||||
Dim1dArray { width: u32, array_layers: u32 },
|
||||
Dim2d { width: u32, height: u32 },
|
||||
Dim2dArray { width: u32, height: u32, array_layers: u32 },
|
||||
Dim2dArray {
|
||||
width: u32,
|
||||
height: u32,
|
||||
array_layers: u32,
|
||||
},
|
||||
Dim3d { width: u32, height: u32, depth: u32 },
|
||||
Cubemap { size: u32 },
|
||||
CubemapArray { size: u32, array_layers: u32 },
|
||||
@ -231,29 +235,66 @@ impl Dimensions {
|
||||
pub fn to_image_dimensions(&self) -> ImageDimensions {
|
||||
match *self {
|
||||
Dimensions::Dim1d { width } => {
|
||||
ImageDimensions::Dim1d { width: width, array_layers: 1 }
|
||||
ImageDimensions::Dim1d {
|
||||
width: width,
|
||||
array_layers: 1,
|
||||
}
|
||||
},
|
||||
Dimensions::Dim1dArray { width, array_layers } => {
|
||||
ImageDimensions::Dim1d { width: width, array_layers: array_layers }
|
||||
Dimensions::Dim1dArray {
|
||||
width,
|
||||
array_layers,
|
||||
} => {
|
||||
ImageDimensions::Dim1d {
|
||||
width: width,
|
||||
array_layers: array_layers,
|
||||
}
|
||||
},
|
||||
Dimensions::Dim2d { width, height } => {
|
||||
ImageDimensions::Dim2d { width: width, height: height, array_layers: 1,
|
||||
cubemap_compatible: false }
|
||||
ImageDimensions::Dim2d {
|
||||
width: width,
|
||||
height: height,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
}
|
||||
},
|
||||
Dimensions::Dim2dArray { width, height, array_layers } => {
|
||||
ImageDimensions::Dim2d { width: width, height: height,
|
||||
array_layers: array_layers, cubemap_compatible: false }
|
||||
Dimensions::Dim2dArray {
|
||||
width,
|
||||
height,
|
||||
array_layers,
|
||||
} => {
|
||||
ImageDimensions::Dim2d {
|
||||
width: width,
|
||||
height: height,
|
||||
array_layers: array_layers,
|
||||
cubemap_compatible: false,
|
||||
}
|
||||
},
|
||||
Dimensions::Dim3d { width, height, depth } => {
|
||||
ImageDimensions::Dim3d { width: width, height: height, depth: depth }
|
||||
Dimensions::Dim3d {
|
||||
width,
|
||||
height,
|
||||
depth,
|
||||
} => {
|
||||
ImageDimensions::Dim3d {
|
||||
width: width,
|
||||
height: height,
|
||||
depth: depth,
|
||||
}
|
||||
},
|
||||
Dimensions::Cubemap { size } => {
|
||||
ImageDimensions::Dim2d { width: size, height: size, array_layers: 6,
|
||||
cubemap_compatible: true }
|
||||
ImageDimensions::Dim2d {
|
||||
width: size,
|
||||
height: size,
|
||||
array_layers: 6,
|
||||
cubemap_compatible: true,
|
||||
}
|
||||
},
|
||||
Dimensions::CubemapArray { size, array_layers } => {
|
||||
ImageDimensions::Dim2d { width: size, height: size, array_layers: array_layers * 6,
|
||||
cubemap_compatible: true }
|
||||
ImageDimensions::Dim2d {
|
||||
width: size,
|
||||
height: size,
|
||||
array_layers: array_layers * 6,
|
||||
cubemap_compatible: true,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -287,8 +328,13 @@ pub enum ViewType {
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ImageDimensions {
|
||||
Dim1d { width: u32, array_layers: u32 },
|
||||
Dim2d { width: u32, height: u32, array_layers: u32, cubemap_compatible: bool },
|
||||
Dim3d { width: u32, height: u32, depth: u32 }
|
||||
Dim2d {
|
||||
width: u32,
|
||||
height: u32,
|
||||
array_layers: u32,
|
||||
cubemap_compatible: bool,
|
||||
},
|
||||
Dim3d { width: u32, height: u32, depth: u32 },
|
||||
}
|
||||
|
||||
impl ImageDimensions {
|
||||
|
@ -7,23 +7,23 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::iter::Empty;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use device::Device;
|
||||
use device::Queue;
|
||||
use format::ClearValue;
|
||||
use format::Format;
|
||||
use format::FormatDesc;
|
||||
use format::FormatTy;
|
||||
use format::Format;
|
||||
use image::Dimensions;
|
||||
use image::ImageDimensions;
|
||||
use image::sys::ImageCreationError;
|
||||
use image::ImageLayout;
|
||||
use image::ImageUsage;
|
||||
use image::sys::ImageCreationError;
|
||||
use image::sys::UnsafeImage;
|
||||
use image::sys::UnsafeImageView;
|
||||
use image::traits::ImageAccess;
|
||||
@ -41,7 +41,9 @@ use sync::Sharing;
|
||||
/// General-purpose image in device memory. Can be used for any usage, but will be slower than a
|
||||
/// specialized image.
|
||||
#[derive(Debug)]
|
||||
pub struct StorageImage<F, A = Arc<StdMemoryPool>> where A: MemoryPool {
|
||||
pub struct StorageImage<F, A = Arc<StdMemoryPool>>
|
||||
where A: MemoryPool
|
||||
{
|
||||
// Inner implementation.
|
||||
image: UnsafeImage,
|
||||
|
||||
@ -76,7 +78,7 @@ impl<F> StorageImage<F> {
|
||||
FormatTy::DepthStencil => true,
|
||||
FormatTy::Stencil => true,
|
||||
FormatTy::Compressed => panic!(),
|
||||
_ => false
|
||||
_ => false,
|
||||
};
|
||||
|
||||
let usage = ImageUsage {
|
||||
@ -90,7 +92,9 @@ impl<F> StorageImage<F> {
|
||||
transient_attachment: false,
|
||||
};
|
||||
|
||||
let queue_families = queue_families.into_iter().map(|f| f.id())
|
||||
let queue_families = queue_families
|
||||
.into_iter()
|
||||
.map(|f| f.id())
|
||||
.collect::<SmallVec<[u32; 4]>>();
|
||||
|
||||
let (image, mem_reqs) = unsafe {
|
||||
@ -100,27 +104,45 @@ impl<F> StorageImage<F> {
|
||||
Sharing::Exclusive
|
||||
};
|
||||
|
||||
try!(UnsafeImage::new(device.clone(), usage, format.format(), dimensions.to_image_dimensions(),
|
||||
1, 1, Sharing::Exclusive::<Empty<u32>>, false, false))
|
||||
UnsafeImage::new(device.clone(),
|
||||
usage,
|
||||
format.format(),
|
||||
dimensions.to_image_dimensions(),
|
||||
1,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<u32>>,
|
||||
false,
|
||||
false)?
|
||||
};
|
||||
|
||||
let mem_ty = {
|
||||
let device_local = device.physical_device().memory_types()
|
||||
let device_local = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
|
||||
.filter(|t| t.is_device_local());
|
||||
let any = device.physical_device().memory_types()
|
||||
let any = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
|
||||
device_local.chain(any).next().unwrap()
|
||||
};
|
||||
|
||||
let mem = try!(MemoryPool::alloc(&Device::standard_pool(&device), mem_ty,
|
||||
mem_reqs.size, mem_reqs.alignment, AllocLayout::Optimal));
|
||||
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
|
||||
mem_ty,
|
||||
mem_reqs.size,
|
||||
mem_reqs.alignment,
|
||||
AllocLayout::Optimal)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe { try!(image.bind_memory(mem.memory(), mem.offset())); }
|
||||
unsafe {
|
||||
image.bind_memory(mem.memory(), mem.offset())?;
|
||||
}
|
||||
|
||||
let view = unsafe {
|
||||
try!(UnsafeImageView::raw(&image, dimensions.to_view_type(), 0 .. image.mipmap_levels(),
|
||||
0 .. image.dimensions().array_layers()))
|
||||
UnsafeImageView::raw(&image,
|
||||
dimensions.to_view_type(),
|
||||
0 .. image.mipmap_levels(),
|
||||
0 .. image.dimensions().array_layers())?
|
||||
};
|
||||
|
||||
Ok(Arc::new(StorageImage {
|
||||
@ -135,7 +157,9 @@ impl<F> StorageImage<F> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, A> StorageImage<F, A> where A: MemoryPool {
|
||||
impl<F, A> StorageImage<F, A>
|
||||
where A: MemoryPool
|
||||
{
|
||||
/// Returns the dimensions of the image.
|
||||
#[inline]
|
||||
pub fn dimensions(&self) -> Dimensions {
|
||||
@ -143,7 +167,10 @@ impl<F, A> StorageImage<F, A> where A: MemoryPool {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<F, A> ImageAccess for StorageImage<F, A> where F: 'static + Send + Sync, A: MemoryPool {
|
||||
unsafe impl<F, A> ImageAccess for StorageImage<F, A>
|
||||
where F: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> &UnsafeImage {
|
||||
&self.image
|
||||
@ -188,7 +215,8 @@ unsafe impl<F, A> ImageAccess for StorageImage<F, A> where F: 'static + Send + S
|
||||
}
|
||||
|
||||
unsafe impl<F, A> ImageClearValue<F::ClearValue> for StorageImage<F, A>
|
||||
where F: FormatDesc + 'static + Send + Sync, A: MemoryPool
|
||||
where F: FormatDesc + 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, value: F::ClearValue) -> Option<ClearValue> {
|
||||
@ -197,7 +225,8 @@ unsafe impl<F, A> ImageClearValue<F::ClearValue> for StorageImage<F, A>
|
||||
}
|
||||
|
||||
unsafe impl<P, F, A> ImageContent<P> for StorageImage<F, A>
|
||||
where F: 'static + Send + Sync, A: MemoryPool
|
||||
where F: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn matches_format(&self) -> bool {
|
||||
@ -206,7 +235,8 @@ unsafe impl<P, F, A> ImageContent<P> for StorageImage<F, A>
|
||||
}
|
||||
|
||||
unsafe impl<F, A> ImageViewAccess for StorageImage<F, A>
|
||||
where F: 'static + Send + Sync, A: MemoryPool
|
||||
where F: 'static + Send + Sync,
|
||||
A: MemoryPool
|
||||
{
|
||||
#[inline]
|
||||
fn parent(&self) -> &ImageAccess {
|
||||
@ -258,7 +288,13 @@ mod tests {
|
||||
#[test]
|
||||
fn create() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let _img = StorageImage::new(device, Dimensions::Dim2d { width: 32, height: 32 },
|
||||
Format::R8G8B8A8Unorm, Some(queue.family())).unwrap();
|
||||
let _img = StorageImage::new(device,
|
||||
Dimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
},
|
||||
Format::R8G8B8A8Unorm,
|
||||
Some(queue.family()))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -13,16 +13,16 @@ use device::Queue;
|
||||
use format::ClearValue;
|
||||
use format::Format;
|
||||
use format::FormatDesc;
|
||||
use image::ImageDimensions;
|
||||
use image::Dimensions;
|
||||
use image::ImageDimensions;
|
||||
use image::ImageLayout;
|
||||
use image::ViewType;
|
||||
use image::sys::UnsafeImage;
|
||||
use image::sys::UnsafeImageView;
|
||||
use image::traits::ImageAccess;
|
||||
use image::traits::ImageClearValue;
|
||||
use image::traits::ImageContent;
|
||||
use image::traits::ImageViewAccess;
|
||||
use image::ImageLayout;
|
||||
use image::sys::UnsafeImage;
|
||||
use image::sys::UnsafeImageView;
|
||||
use swapchain::Swapchain;
|
||||
use sync::AccessError;
|
||||
|
||||
@ -53,10 +53,9 @@ impl SwapchainImage {
|
||||
///
|
||||
/// This is an internal method that you shouldn't call.
|
||||
pub unsafe fn from_raw(swapchain: Arc<Swapchain>, id: usize)
|
||||
-> Result<Arc<SwapchainImage>, OomError>
|
||||
{
|
||||
-> Result<Arc<SwapchainImage>, OomError> {
|
||||
let image = swapchain.raw_image(id).unwrap();
|
||||
let view = try!(UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1));
|
||||
let view = UnsafeImageView::raw(&image, ViewType::Dim2d, 0 .. 1, 0 .. 1)?;
|
||||
|
||||
Ok(Arc::new(SwapchainImage {
|
||||
swapchain: swapchain.clone(),
|
||||
@ -122,8 +121,7 @@ unsafe impl ImageAccess for SwapchainImage {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl ImageClearValue<<Format as FormatDesc>::ClearValue> for SwapchainImage
|
||||
{
|
||||
unsafe impl ImageClearValue<<Format as FormatDesc>::ClearValue> for SwapchainImage {
|
||||
#[inline]
|
||||
fn decode(&self, value: <Format as FormatDesc>::ClearValue) -> Option<ClearValue> {
|
||||
Some(self.swapchain.format().decode_clear_value(value))
|
||||
@ -146,7 +144,10 @@ unsafe impl ImageViewAccess for SwapchainImage {
|
||||
#[inline]
|
||||
fn dimensions(&self) -> Dimensions {
|
||||
let dims = self.swapchain.dimensions();
|
||||
Dimensions::Dim2d { width: dims[0], height: dims[1] }
|
||||
Dimensions::Dim2d {
|
||||
width: dims[0],
|
||||
height: dims[1],
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -13,13 +13,13 @@
|
||||
//! other image or image view types of this library, and all custom image or image view types
|
||||
//! that you create must wrap around the types in this module.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use device::Device;
|
||||
use format::Format;
|
||||
@ -83,15 +83,23 @@ impl UnsafeImage {
|
||||
sharing: Sharing<I>, linear_tiling: bool,
|
||||
preinitialized_layout: bool)
|
||||
-> Result<(UnsafeImage, MemoryRequirements), ImageCreationError>
|
||||
where Mi: Into<MipmapsCount>, I: Iterator<Item = u32>
|
||||
where Mi: Into<MipmapsCount>,
|
||||
I: Iterator<Item = u32>
|
||||
{
|
||||
let sharing = match sharing {
|
||||
Sharing::Exclusive => (vk::SHARING_MODE_EXCLUSIVE, SmallVec::<[u32; 8]>::new()),
|
||||
Sharing::Concurrent(ids) => (vk::SHARING_MODE_CONCURRENT, ids.collect()),
|
||||
};
|
||||
|
||||
UnsafeImage::new_impl(device, usage, format, dimensions, num_samples, mipmaps.into(),
|
||||
sharing, linear_tiling, preinitialized_layout)
|
||||
UnsafeImage::new_impl(device,
|
||||
usage,
|
||||
format,
|
||||
dimensions,
|
||||
num_samples,
|
||||
mipmaps.into(),
|
||||
sharing,
|
||||
linear_tiling,
|
||||
preinitialized_layout)
|
||||
}
|
||||
|
||||
// Non-templated version to avoid inlining and improve compile times.
|
||||
@ -99,8 +107,7 @@ impl UnsafeImage {
|
||||
dimensions: ImageDimensions, num_samples: u32, mipmaps: MipmapsCount,
|
||||
(sh_mode, sh_indices): (vk::SharingMode, SmallVec<[u32; 8]>),
|
||||
linear_tiling: bool, preinitialized_layout: bool)
|
||||
-> Result<(UnsafeImage, MemoryRequirements), ImageCreationError>
|
||||
{
|
||||
-> Result<(UnsafeImage, MemoryRequirements), ImageCreationError> {
|
||||
// TODO: doesn't check that the proper features are enabled
|
||||
|
||||
let vk = device.pointers();
|
||||
@ -132,17 +139,27 @@ impl UnsafeImage {
|
||||
if usage.color_attachment && (features & vk::FORMAT_FEATURE_COLOR_ATTACHMENT_BIT == 0) {
|
||||
return Err(ImageCreationError::UnsupportedUsage);
|
||||
}
|
||||
if usage.depth_stencil_attachment && (features & vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT == 0) {
|
||||
if usage.depth_stencil_attachment &&
|
||||
(features & vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT == 0)
|
||||
{
|
||||
return Err(ImageCreationError::UnsupportedUsage);
|
||||
}
|
||||
if usage.input_attachment && (features & (vk::FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
|
||||
if usage.input_attachment &&
|
||||
(features &
|
||||
(vk::FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
|
||||
vk::FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0)
|
||||
{
|
||||
return Err(ImageCreationError::UnsupportedUsage);
|
||||
}
|
||||
if device.loaded_extensions().khr_maintenance1 {
|
||||
if usage.transfer_source && (features & vk::FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR == 0) {
|
||||
if usage.transfer_source &&
|
||||
(features & vk::FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR == 0)
|
||||
{
|
||||
return Err(ImageCreationError::UnsupportedUsage);
|
||||
}
|
||||
if usage.transfer_dest && (features & vk::FORMAT_FEATURE_TRANSFER_DST_BIT_KHR == 0) {
|
||||
if usage.transfer_dest &&
|
||||
(features & vk::FORMAT_FEATURE_TRANSFER_DST_BIT_KHR == 0)
|
||||
{
|
||||
return Err(ImageCreationError::UnsupportedUsage);
|
||||
}
|
||||
}
|
||||
@ -181,7 +198,11 @@ impl UnsafeImage {
|
||||
ImageDimensions::Dim2d { width, height, .. } => {
|
||||
if width < height { width } else { height }
|
||||
},
|
||||
ImageDimensions::Dim3d { width, height, depth } => {
|
||||
ImageDimensions::Dim3d {
|
||||
width,
|
||||
height,
|
||||
depth,
|
||||
} => {
|
||||
if width < height {
|
||||
if depth < width { depth } else { width }
|
||||
} else {
|
||||
@ -198,11 +219,13 @@ impl UnsafeImage {
|
||||
MipmapsCount::Specific(num) => {
|
||||
if num < 1 {
|
||||
return Err(ImageCreationError::InvalidMipmapsCount {
|
||||
obtained: num, valid_range: 1 .. max_mipmaps + 1
|
||||
obtained: num,
|
||||
valid_range: 1 .. max_mipmaps + 1,
|
||||
});
|
||||
} else if num > max_mipmaps {
|
||||
capabilities_error = Some(ImageCreationError::InvalidMipmapsCount {
|
||||
obtained: num, valid_range: 1 .. max_mipmaps + 1
|
||||
obtained: num,
|
||||
valid_range: 1 .. max_mipmaps + 1,
|
||||
});
|
||||
}
|
||||
|
||||
@ -225,55 +248,79 @@ impl UnsafeImage {
|
||||
if usage.sampled {
|
||||
match format.ty() {
|
||||
FormatTy::Float | FormatTy::Compressed => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.sampled_image_color_sample_counts();
|
||||
},
|
||||
FormatTy::Uint | FormatTy::Sint => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.sampled_image_integer_sample_counts();
|
||||
},
|
||||
FormatTy::Depth => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.sampled_image_depth_sample_counts();
|
||||
},
|
||||
FormatTy::Stencil => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.sampled_image_stencil_sample_counts();
|
||||
},
|
||||
FormatTy::DepthStencil => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.sampled_image_depth_sample_counts();
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.sampled_image_stencil_sample_counts();
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if usage.storage {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.storage_image_sample_counts();
|
||||
}
|
||||
|
||||
if usage.color_attachment || usage.depth_stencil_attachment || usage.input_attachment ||
|
||||
usage.transient_attachment
|
||||
if usage.color_attachment || usage.depth_stencil_attachment ||
|
||||
usage.input_attachment || usage.transient_attachment
|
||||
{
|
||||
match format.ty() {
|
||||
FormatTy::Float | FormatTy::Compressed | FormatTy::Uint | FormatTy::Sint => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.framebuffer_color_sample_counts();
|
||||
},
|
||||
FormatTy::Depth => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.framebuffer_depth_sample_counts();
|
||||
},
|
||||
FormatTy::Stencil => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.framebuffer_stencil_sample_counts();
|
||||
},
|
||||
FormatTy::DepthStencil => {
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.framebuffer_depth_sample_counts();
|
||||
supported_samples &= device.physical_device().limits()
|
||||
supported_samples &= device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.framebuffer_stencil_sample_counts();
|
||||
},
|
||||
}
|
||||
@ -295,30 +342,65 @@ impl UnsafeImage {
|
||||
|
||||
// Decoding the dimensions.
|
||||
let (ty, extent, array_layers, flags) = match dimensions {
|
||||
ImageDimensions::Dim1d { width, array_layers } => {
|
||||
ImageDimensions::Dim1d {
|
||||
width,
|
||||
array_layers,
|
||||
} => {
|
||||
if width == 0 || array_layers == 0 {
|
||||
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
|
||||
return Err(ImageCreationError::UnsupportedDimensions {
|
||||
dimensions: dimensions,
|
||||
});
|
||||
}
|
||||
let extent = vk::Extent3D { width: width, height: 1, depth: 1 };
|
||||
let extent = vk::Extent3D {
|
||||
width: width,
|
||||
height: 1,
|
||||
depth: 1,
|
||||
};
|
||||
(vk::IMAGE_TYPE_1D, extent, array_layers, 0)
|
||||
},
|
||||
ImageDimensions::Dim2d { width, height, array_layers, cubemap_compatible } => {
|
||||
ImageDimensions::Dim2d {
|
||||
width,
|
||||
height,
|
||||
array_layers,
|
||||
cubemap_compatible,
|
||||
} => {
|
||||
if width == 0 || height == 0 || array_layers == 0 {
|
||||
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
|
||||
return Err(ImageCreationError::UnsupportedDimensions {
|
||||
dimensions: dimensions,
|
||||
});
|
||||
}
|
||||
if cubemap_compatible && width != height {
|
||||
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
|
||||
return Err(ImageCreationError::UnsupportedDimensions {
|
||||
dimensions: dimensions,
|
||||
});
|
||||
}
|
||||
let extent = vk::Extent3D { width: width, height: height, depth: 1 };
|
||||
let flags = if cubemap_compatible { vk::IMAGE_CREATE_CUBE_COMPATIBLE_BIT }
|
||||
else { 0 };
|
||||
let extent = vk::Extent3D {
|
||||
width: width,
|
||||
height: height,
|
||||
depth: 1,
|
||||
};
|
||||
let flags = if cubemap_compatible {
|
||||
vk::IMAGE_CREATE_CUBE_COMPATIBLE_BIT
|
||||
} else {
|
||||
0
|
||||
};
|
||||
(vk::IMAGE_TYPE_2D, extent, array_layers, flags)
|
||||
},
|
||||
ImageDimensions::Dim3d { width, height, depth } => {
|
||||
ImageDimensions::Dim3d {
|
||||
width,
|
||||
height,
|
||||
depth,
|
||||
} => {
|
||||
if width == 0 || height == 0 || depth == 0 {
|
||||
return Err(ImageCreationError::UnsupportedDimensions { dimensions: dimensions });
|
||||
return Err(ImageCreationError::UnsupportedDimensions {
|
||||
dimensions: dimensions,
|
||||
});
|
||||
}
|
||||
let extent = vk::Extent3D { width: width, height: height, depth: depth };
|
||||
let extent = vk::Extent3D {
|
||||
width: width,
|
||||
height: height,
|
||||
depth: depth,
|
||||
};
|
||||
(vk::IMAGE_TYPE_3D, extent, 1, 0)
|
||||
},
|
||||
};
|
||||
@ -346,7 +428,8 @@ impl UnsafeImage {
|
||||
let limit = device.physical_device().limits().max_image_dimension_cube();
|
||||
debug_assert_eq!(extent.width, extent.height); // checked above
|
||||
if extent.width > limit {
|
||||
let err = ImageCreationError::UnsupportedDimensions { dimensions: dimensions };
|
||||
let err =
|
||||
ImageCreationError::UnsupportedDimensions { dimensions: dimensions };
|
||||
capabilities_error = Some(err);
|
||||
}
|
||||
}
|
||||
@ -358,7 +441,7 @@ impl UnsafeImage {
|
||||
capabilities_error = Some(err);
|
||||
}
|
||||
},
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let usage = usage.to_usage_bits();
|
||||
@ -374,19 +457,26 @@ impl UnsafeImage {
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
let physical_device = device.physical_device().internal_object();
|
||||
let r = vk_i.GetPhysicalDeviceImageFormatProperties(physical_device, format as u32, ty,
|
||||
tiling, usage, 0 /* TODO */,
|
||||
let r = vk_i.GetPhysicalDeviceImageFormatProperties(physical_device,
|
||||
format as u32,
|
||||
ty,
|
||||
tiling,
|
||||
usage,
|
||||
0, /* TODO */
|
||||
&mut output);
|
||||
|
||||
match check_errors(r) {
|
||||
Ok(_) => (),
|
||||
Err(Error::FormatNotSupported) => return Err(ImageCreationError::FormatNotSupported),
|
||||
Err(Error::FormatNotSupported) =>
|
||||
return Err(ImageCreationError::FormatNotSupported),
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
|
||||
if extent.width > output.maxExtent.width || extent.height > output.maxExtent.height ||
|
||||
extent.depth > output.maxExtent.depth || mipmaps > output.maxMipLevels ||
|
||||
array_layers > output.maxArrayLayers || (num_samples & output.sampleCounts) == 0
|
||||
extent.depth > output.maxExtent.depth ||
|
||||
mipmaps > output.maxMipLevels ||
|
||||
array_layers > output.maxArrayLayers ||
|
||||
(num_samples & output.sampleCounts) == 0
|
||||
{
|
||||
return Err(capabilities_error);
|
||||
}
|
||||
@ -421,8 +511,10 @@ impl UnsafeImage {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateImage(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateImage(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -453,8 +545,7 @@ impl UnsafeImage {
|
||||
/// This function is for example used at the swapchain's initialization.
|
||||
pub unsafe fn from_raw(device: Arc<Device>, handle: u64, usage: u32, format: Format,
|
||||
dimensions: ImageDimensions, samples: u32, mipmaps: u32)
|
||||
-> UnsafeImage
|
||||
{
|
||||
-> UnsafeImage {
|
||||
let vk_i = device.instance().pointers();
|
||||
let physical_device = device.physical_device().internal_object();
|
||||
|
||||
@ -476,23 +567,24 @@ impl UnsafeImage {
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize)
|
||||
-> Result<(), OomError>
|
||||
{
|
||||
pub unsafe fn bind_memory(&self, memory: &DeviceMemory, offset: usize) -> Result<(), OomError> {
|
||||
let vk = self.device.pointers();
|
||||
|
||||
// We check for correctness in debug mode.
|
||||
debug_assert!({
|
||||
let mut mem_reqs = mem::uninitialized();
|
||||
vk.GetImageMemoryRequirements(self.device.internal_object(), self.image,
|
||||
vk.GetImageMemoryRequirements(self.device.internal_object(),
|
||||
self.image,
|
||||
&mut mem_reqs);
|
||||
mem_reqs.size <= (memory.size() - offset) as u64 &&
|
||||
(offset as u64 % mem_reqs.alignment) == 0 &&
|
||||
mem_reqs.memoryTypeBits & (1 << memory.memory_type().id()) != 0
|
||||
});
|
||||
|
||||
try!(check_errors(vk.BindImageMemory(self.device.internal_object(), self.image,
|
||||
memory.internal_object(), offset as vk::DeviceSize)));
|
||||
check_errors(vk.BindImageMemory(self.device.internal_object(),
|
||||
self.image,
|
||||
memory.internal_object(),
|
||||
offset as vk::DeviceSize))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -599,7 +691,9 @@ impl UnsafeImage {
|
||||
};
|
||||
|
||||
let mut out = mem::uninitialized();
|
||||
vk.GetImageSubresourceLayout(self.device.internal_object(), self.image, &subresource,
|
||||
vk.GetImageSubresourceLayout(self.device.internal_object(),
|
||||
self.image,
|
||||
&subresource,
|
||||
&mut out);
|
||||
|
||||
LinearLayout {
|
||||
@ -706,7 +800,10 @@ pub enum ImageCreationError {
|
||||
/// Not enough memory.
|
||||
OomError(OomError),
|
||||
/// A wrong number of mipmaps was provided.
|
||||
InvalidMipmapsCount { obtained: u32, valid_range: Range<u32> },
|
||||
InvalidMipmapsCount {
|
||||
obtained: u32,
|
||||
valid_range: Range<u32>,
|
||||
},
|
||||
/// The requeted number of samples is not supported, or is 0.
|
||||
UnsupportedSamplesCount { obtained: u32 },
|
||||
/// The dimensions are too large, or one of the dimensions is 0.
|
||||
@ -724,16 +821,17 @@ impl error::Error for ImageCreationError {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
ImageCreationError::OomError(_) => "not enough memory available",
|
||||
ImageCreationError::InvalidMipmapsCount { .. } => "a wrong number of mipmaps was \
|
||||
provided",
|
||||
ImageCreationError::UnsupportedSamplesCount { .. } => "the requeted number of samples \
|
||||
is not supported, or is 0",
|
||||
ImageCreationError::UnsupportedDimensions { .. } => "the dimensions are too large, or \
|
||||
one of the dimensions is 0",
|
||||
ImageCreationError::FormatNotSupported => "the requested format is not supported by \
|
||||
the Vulkan implementation",
|
||||
ImageCreationError::UnsupportedUsage => "the format is supported, but at least one \
|
||||
of the requested usages is not supported",
|
||||
ImageCreationError::InvalidMipmapsCount { .. } =>
|
||||
"a wrong number of mipmaps was provided",
|
||||
ImageCreationError::UnsupportedSamplesCount { .. } =>
|
||||
"the requeted number of samples is not supported, or is 0",
|
||||
ImageCreationError::UnsupportedDimensions { .. } =>
|
||||
"the dimensions are too large, or one of the dimensions is 0",
|
||||
ImageCreationError::FormatNotSupported =>
|
||||
"the requested format is not supported by the Vulkan implementation",
|
||||
ImageCreationError::UnsupportedUsage =>
|
||||
"the format is supported, but at least one of the requested usages is not \
|
||||
supported",
|
||||
ImageCreationError::ShaderStorageImageMultisampleFeatureNotEnabled => {
|
||||
"the `shader_storage_image_multisample` feature must be enabled to create such \
|
||||
an image"
|
||||
@ -745,7 +843,7 @@ impl error::Error for ImageCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
ImageCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -770,7 +868,7 @@ impl From<Error> for ImageCreationError {
|
||||
match err {
|
||||
err @ Error::OutOfHostMemory => ImageCreationError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => ImageCreationError::OomError(OomError::from(err)),
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -810,8 +908,8 @@ pub struct UnsafeImageView {
|
||||
impl UnsafeImageView {
|
||||
/// See the docs of new().
|
||||
pub unsafe fn raw(image: &UnsafeImage, ty: ViewType, mipmap_levels: Range<u32>,
|
||||
array_layers: Range<u32>) -> Result<UnsafeImageView, OomError>
|
||||
{
|
||||
array_layers: Range<u32>)
|
||||
-> Result<UnsafeImageView, OomError> {
|
||||
let vk = image.device.pointers();
|
||||
|
||||
assert!(mipmap_levels.end > mipmap_levels.start);
|
||||
@ -830,19 +928,23 @@ impl UnsafeImageView {
|
||||
|
||||
let view_type = match (image.dimensions(), ty, array_layers.end - array_layers.start) {
|
||||
(ImageDimensions::Dim1d { .. }, ViewType::Dim1d, 1) => vk::IMAGE_VIEW_TYPE_1D,
|
||||
(ImageDimensions::Dim1d { .. }, ViewType::Dim1dArray, _) => vk::IMAGE_VIEW_TYPE_1D_ARRAY,
|
||||
(ImageDimensions::Dim1d { .. }, ViewType::Dim1dArray, _) =>
|
||||
vk::IMAGE_VIEW_TYPE_1D_ARRAY,
|
||||
(ImageDimensions::Dim2d { .. }, ViewType::Dim2d, 1) => vk::IMAGE_VIEW_TYPE_2D,
|
||||
(ImageDimensions::Dim2d { .. }, ViewType::Dim2dArray, _) => vk::IMAGE_VIEW_TYPE_2D_ARRAY,
|
||||
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::Cubemap, n) if cubemap_compatible => {
|
||||
(ImageDimensions::Dim2d { .. }, ViewType::Dim2dArray, _) =>
|
||||
vk::IMAGE_VIEW_TYPE_2D_ARRAY,
|
||||
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::Cubemap, n)
|
||||
if cubemap_compatible => {
|
||||
assert_eq!(n, 6);
|
||||
vk::IMAGE_VIEW_TYPE_CUBE
|
||||
},
|
||||
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::CubemapArray, n) if cubemap_compatible => {
|
||||
(ImageDimensions::Dim2d { cubemap_compatible, .. }, ViewType::CubemapArray, n)
|
||||
if cubemap_compatible => {
|
||||
assert_eq!(n % 6, 0);
|
||||
vk::IMAGE_VIEW_TYPE_CUBE_ARRAY
|
||||
},
|
||||
(ImageDimensions::Dim3d { .. }, ViewType::Dim3d, _) => vk::IMAGE_VIEW_TYPE_3D,
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
let view = {
|
||||
@ -853,7 +955,12 @@ impl UnsafeImageView {
|
||||
image: image.internal_object(),
|
||||
viewType: view_type,
|
||||
format: image.format as u32,
|
||||
components: vk::ComponentMapping { r: 0, g: 0, b: 0, a: 0 }, // FIXME:
|
||||
components: vk::ComponentMapping {
|
||||
r: 0,
|
||||
g: 0,
|
||||
b: 0,
|
||||
a: 0,
|
||||
}, // FIXME:
|
||||
subresourceRange: vk::ImageSubresourceRange {
|
||||
aspectMask: aspect_mask,
|
||||
baseMipLevel: mipmap_levels.start,
|
||||
@ -864,8 +971,10 @@ impl UnsafeImageView {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateImageView(image.device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateImageView(image.device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -895,8 +1004,8 @@ impl UnsafeImageView {
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn new(image: &UnsafeImage, ty: ViewType, mipmap_levels: Range<u32>,
|
||||
array_layers: Range<u32>) -> UnsafeImageView
|
||||
{
|
||||
array_layers: Range<u32>)
|
||||
-> UnsafeImageView {
|
||||
UnsafeImageView::raw(image, ty, mipmap_levels, array_layers).unwrap()
|
||||
}
|
||||
|
||||
@ -978,11 +1087,11 @@ mod tests {
|
||||
use std::u32;
|
||||
|
||||
use super::ImageCreationError;
|
||||
use super::UnsafeImage;
|
||||
use super::ImageUsage;
|
||||
use super::UnsafeImage;
|
||||
|
||||
use image::ImageDimensions;
|
||||
use format::Format;
|
||||
use image::ImageDimensions;
|
||||
use sync::Sharing;
|
||||
|
||||
#[test]
|
||||
@ -995,10 +1104,20 @@ mod tests {
|
||||
};
|
||||
|
||||
let (_img, _) = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 1, 1,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
1,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
}.unwrap();
|
||||
}
|
||||
|
||||
@ -1013,10 +1132,20 @@ mod tests {
|
||||
};
|
||||
|
||||
let (_img, _) = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 1, 1,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
1,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
}.unwrap();
|
||||
}
|
||||
|
||||
@ -1030,15 +1159,25 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 0, 1,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
0,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::UnsupportedSamplesCount { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1052,15 +1191,25 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 5, 1,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
5,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::UnsupportedSamplesCount { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1074,15 +1223,25 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 1, 0,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
1,
|
||||
0,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::InvalidMipmapsCount { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1097,18 +1256,31 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 1, u32::MAX,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
1,
|
||||
u32::MAX,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::InvalidMipmapsCount { obtained, valid_range }) => {
|
||||
Err(ImageCreationError::InvalidMipmapsCount {
|
||||
obtained,
|
||||
valid_range,
|
||||
}) => {
|
||||
assert_eq!(obtained, u32::MAX);
|
||||
assert_eq!(valid_range.start, 1);
|
||||
},
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1122,16 +1294,26 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 2, 1,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
2,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::ShaderStorageImageMultisampleFeatureNotEnabled) => (),
|
||||
Err(ImageCreationError::UnsupportedSamplesCount { .. }) => (), // unlikely but possible
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1145,16 +1327,26 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::ASTC_5x4UnormBlock,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 1, u32::MAX,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::ASTC_5x4UnormBlock,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
1,
|
||||
u32::MAX,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::FormatNotSupported) => (),
|
||||
Err(ImageCreationError::UnsupportedUsage) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1169,15 +1361,25 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 32, array_layers: 1,
|
||||
cubemap_compatible: false }, 1, 1,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: false,
|
||||
},
|
||||
1,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::UnsupportedUsage) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1191,15 +1393,25 @@ mod tests {
|
||||
};
|
||||
|
||||
let res = unsafe {
|
||||
UnsafeImage::new(device, usage, Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d { width: 32, height: 64, array_layers: 1,
|
||||
cubemap_compatible: true }, 1, 1,
|
||||
Sharing::Exclusive::<Empty<_>>, false, false)
|
||||
UnsafeImage::new(device,
|
||||
usage,
|
||||
Format::R8G8B8A8Unorm,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 64,
|
||||
array_layers: 1,
|
||||
cubemap_compatible: true,
|
||||
},
|
||||
1,
|
||||
1,
|
||||
Sharing::Exclusive::<Empty<_>>,
|
||||
false,
|
||||
false)
|
||||
};
|
||||
|
||||
match res {
|
||||
Err(ImageCreationError::UnsupportedDimensions { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -11,12 +11,12 @@ use buffer::BufferAccess;
|
||||
use device::Queue;
|
||||
use format::ClearValue;
|
||||
use format::Format;
|
||||
use format::PossibleFloatFormatDesc;
|
||||
use format::PossibleUintFormatDesc;
|
||||
use format::PossibleSintFormatDesc;
|
||||
use format::PossibleDepthFormatDesc;
|
||||
use format::PossibleStencilFormatDesc;
|
||||
use format::PossibleDepthStencilFormatDesc;
|
||||
use format::PossibleFloatFormatDesc;
|
||||
use format::PossibleSintFormatDesc;
|
||||
use format::PossibleStencilFormatDesc;
|
||||
use format::PossibleUintFormatDesc;
|
||||
use image::Dimensions;
|
||||
use image::ImageDimensions;
|
||||
use image::ImageLayout;
|
||||
@ -117,10 +117,10 @@ pub unsafe trait ImageAccess {
|
||||
///
|
||||
/// If this function returns `false`, this means that we are allowed to access the offset/size
|
||||
/// of `self` at the same time as the offset/size of `other` without causing a data race.
|
||||
fn conflicts_buffer(&self, self_first_layer: u32, self_num_layers: u32, self_first_mipmap: u32,
|
||||
self_num_mipmaps: u32, other: &BufferAccess, other_offset: usize,
|
||||
other_size: usize) -> bool
|
||||
{
|
||||
fn conflicts_buffer(&self, self_first_layer: u32, self_num_layers: u32,
|
||||
self_first_mipmap: u32, self_num_mipmaps: u32, other: &BufferAccess,
|
||||
other_offset: usize, other_size: usize)
|
||||
-> bool {
|
||||
// TODO: should we really provide a default implementation?
|
||||
false
|
||||
}
|
||||
@ -132,11 +132,11 @@ pub unsafe trait ImageAccess {
|
||||
///
|
||||
/// If this function returns `false`, this means that we are allowed to access the offset/size
|
||||
/// of `self` at the same time as the offset/size of `other` without causing a data race.
|
||||
fn conflicts_image(&self, self_first_layer: u32, self_num_layers: u32, self_first_mipmap: u32,
|
||||
self_num_mipmaps: u32, other: &ImageAccess,
|
||||
fn conflicts_image(&self, self_first_layer: u32, self_num_layers: u32,
|
||||
self_first_mipmap: u32, self_num_mipmaps: u32, other: &ImageAccess,
|
||||
other_first_layer: u32, other_num_layers: u32, other_first_mipmap: u32,
|
||||
other_num_mipmaps: u32) -> bool
|
||||
{
|
||||
other_num_mipmaps: u32)
|
||||
-> bool {
|
||||
// TODO: should we really provide a default implementation?
|
||||
|
||||
// TODO: debug asserts to check for ranges
|
||||
@ -166,15 +166,27 @@ pub unsafe trait ImageAccess {
|
||||
/// Shortcut for `conflicts_buffer` that compares the whole buffer to another.
|
||||
#[inline]
|
||||
fn conflicts_buffer_all(&self, other: &BufferAccess) -> bool {
|
||||
self.conflicts_buffer(0, self.dimensions().array_layers(), 0, self.mipmap_levels(),
|
||||
other, 0, other.size())
|
||||
self.conflicts_buffer(0,
|
||||
self.dimensions().array_layers(),
|
||||
0,
|
||||
self.mipmap_levels(),
|
||||
other,
|
||||
0,
|
||||
other.size())
|
||||
}
|
||||
|
||||
/// Shortcut for `conflicts_image` that compares the whole buffer to a whole image.
|
||||
#[inline]
|
||||
fn conflicts_image_all(&self, other: &ImageAccess) -> bool {
|
||||
self.conflicts_image(0, self.dimensions().array_layers(), 0, self.mipmap_levels(),
|
||||
other, 0, other.dimensions().array_layers(), 0, other.mipmap_levels())
|
||||
self.conflicts_image(0,
|
||||
self.dimensions().array_layers(),
|
||||
0,
|
||||
self.mipmap_levels(),
|
||||
other,
|
||||
0,
|
||||
other.dimensions().array_layers(),
|
||||
0,
|
||||
other.mipmap_levels())
|
||||
}
|
||||
|
||||
/// Shortcut for `conflict_key` that grabs the key of the whole buffer.
|
||||
@ -215,7 +227,10 @@ pub unsafe trait ImageAccess {
|
||||
unsafe fn unlock(&self);
|
||||
}
|
||||
|
||||
unsafe impl<T> ImageAccess for T where T: SafeDeref, T::Target: ImageAccess {
|
||||
unsafe impl<T> ImageAccess for T
|
||||
where T: SafeDeref,
|
||||
T::Target: ImageAccess
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> &UnsafeImage {
|
||||
(**self).inner()
|
||||
@ -233,8 +248,7 @@ unsafe impl<T> ImageAccess for T where T: SafeDeref, T::Target: ImageAccess {
|
||||
|
||||
#[inline]
|
||||
fn conflict_key(&self, first_layer: u32, num_layers: u32, first_mipmap: u32, num_mipmaps: u32)
|
||||
-> u64
|
||||
{
|
||||
-> u64 {
|
||||
(**self).conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
|
||||
}
|
||||
|
||||
@ -286,9 +300,9 @@ unsafe impl<I> ImageAccess for ImageAccessFromUndefinedLayout<I>
|
||||
|
||||
#[inline]
|
||||
fn conflict_key(&self, first_layer: u32, num_layers: u32, first_mipmap: u32, num_mipmaps: u32)
|
||||
-> u64
|
||||
{
|
||||
self.image.conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
|
||||
-> u64 {
|
||||
self.image
|
||||
.conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -360,12 +374,17 @@ pub unsafe trait ImageViewAccess {
|
||||
/// This method should check whether the sampler's configuration can be used with the format
|
||||
/// of the view.
|
||||
// TODO: return a Result
|
||||
fn can_be_sampled(&self, sampler: &Sampler) -> bool { true /* FIXME */ }
|
||||
fn can_be_sampled(&self, sampler: &Sampler) -> bool {
|
||||
true /* FIXME */
|
||||
}
|
||||
|
||||
//fn usable_as_render_pass_attachment(&self, ???) -> Result<(), ???>;
|
||||
}
|
||||
|
||||
unsafe impl<T> ImageViewAccess for T where T: SafeDeref, T::Target: ImageViewAccess {
|
||||
unsafe impl<T> ImageViewAccess for T
|
||||
where T: SafeDeref,
|
||||
T::Target: ImageViewAccess
|
||||
{
|
||||
#[inline]
|
||||
fn parent(&self) -> &ImageAccess {
|
||||
(**self).parent()
|
||||
|
@ -98,14 +98,30 @@ impl ImageUsage {
|
||||
#[inline]
|
||||
pub fn to_usage_bits(&self) -> vk::ImageUsageFlagBits {
|
||||
let mut result = 0;
|
||||
if self.transfer_source { result |= vk::IMAGE_USAGE_TRANSFER_SRC_BIT; }
|
||||
if self.transfer_dest { result |= vk::IMAGE_USAGE_TRANSFER_DST_BIT; }
|
||||
if self.sampled { result |= vk::IMAGE_USAGE_SAMPLED_BIT; }
|
||||
if self.storage { result |= vk::IMAGE_USAGE_STORAGE_BIT; }
|
||||
if self.color_attachment { result |= vk::IMAGE_USAGE_COLOR_ATTACHMENT_BIT; }
|
||||
if self.depth_stencil_attachment { result |= vk::IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; }
|
||||
if self.transient_attachment { result |= vk::IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; }
|
||||
if self.input_attachment { result |= vk::IMAGE_USAGE_INPUT_ATTACHMENT_BIT; }
|
||||
if self.transfer_source {
|
||||
result |= vk::IMAGE_USAGE_TRANSFER_SRC_BIT;
|
||||
}
|
||||
if self.transfer_dest {
|
||||
result |= vk::IMAGE_USAGE_TRANSFER_DST_BIT;
|
||||
}
|
||||
if self.sampled {
|
||||
result |= vk::IMAGE_USAGE_SAMPLED_BIT;
|
||||
}
|
||||
if self.storage {
|
||||
result |= vk::IMAGE_USAGE_STORAGE_BIT;
|
||||
}
|
||||
if self.color_attachment {
|
||||
result |= vk::IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
|
||||
}
|
||||
if self.depth_stencil_attachment {
|
||||
result |= vk::IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
|
||||
}
|
||||
if self.transient_attachment {
|
||||
result |= vk::IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
|
||||
}
|
||||
if self.input_attachment {
|
||||
result |= vk::IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
|
@ -41,16 +41,16 @@ use std::error;
|
||||
use std::ffi::CStr;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::os::raw::{c_void, c_char};
|
||||
use std::os::raw::{c_char, c_void};
|
||||
use std::panic;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use instance::Instance;
|
||||
|
||||
use check_errors;
|
||||
use Error;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
/// Registration of a callback called by validation layers.
|
||||
@ -80,27 +80,27 @@ impl DebugCallback {
|
||||
// that can't be casted to a `*const c_void`.
|
||||
let user_callback = Box::new(Box::new(user_callback) as Box<_>);
|
||||
|
||||
extern "system" fn callback(ty: vk::DebugReportFlagsEXT,
|
||||
_: vk::DebugReportObjectTypeEXT, _: u64, _: usize,
|
||||
_: i32, layer_prefix: *const c_char,
|
||||
description: *const c_char, user_data: *mut c_void) -> u32
|
||||
{
|
||||
extern "system" fn callback(ty: vk::DebugReportFlagsEXT, _: vk::DebugReportObjectTypeEXT,
|
||||
_: u64, _: usize, _: i32, layer_prefix: *const c_char,
|
||||
description: *const c_char, user_data: *mut c_void)
|
||||
-> u32 {
|
||||
unsafe {
|
||||
let user_callback = user_data as *mut Box<Fn()> as *const _;
|
||||
let user_callback: &Box<Fn(&Message)> = &*user_callback;
|
||||
|
||||
let layer_prefix = CStr::from_ptr(layer_prefix).to_str()
|
||||
.expect("debug callback message \
|
||||
not utf-8");
|
||||
let description = CStr::from_ptr(description).to_str()
|
||||
.expect("debug callback message \
|
||||
not utf-8");
|
||||
let layer_prefix = CStr::from_ptr(layer_prefix)
|
||||
.to_str()
|
||||
.expect("debug callback message not utf-8");
|
||||
let description = CStr::from_ptr(description)
|
||||
.to_str()
|
||||
.expect("debug callback message not utf-8");
|
||||
|
||||
let message = Message {
|
||||
ty: MessageTypes {
|
||||
information: (ty & vk::DEBUG_REPORT_INFORMATION_BIT_EXT) != 0,
|
||||
warning: (ty & vk::DEBUG_REPORT_WARNING_BIT_EXT) != 0,
|
||||
performance_warning: (ty & vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) != 0,
|
||||
performance_warning: (ty & vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) !=
|
||||
0,
|
||||
error: (ty & vk::DEBUG_REPORT_ERROR_BIT_EXT) != 0,
|
||||
debug: (ty & vk::DEBUG_REPORT_DEBUG_BIT_EXT) != 0,
|
||||
},
|
||||
@ -120,11 +120,21 @@ impl DebugCallback {
|
||||
|
||||
let flags = {
|
||||
let mut flags = 0;
|
||||
if messages.information { flags |= vk::DEBUG_REPORT_INFORMATION_BIT_EXT; }
|
||||
if messages.warning { flags |= vk::DEBUG_REPORT_WARNING_BIT_EXT; }
|
||||
if messages.performance_warning { flags |= vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; }
|
||||
if messages.error { flags |= vk::DEBUG_REPORT_ERROR_BIT_EXT; }
|
||||
if messages.debug { flags |= vk::DEBUG_REPORT_DEBUG_BIT_EXT; }
|
||||
if messages.information {
|
||||
flags |= vk::DEBUG_REPORT_INFORMATION_BIT_EXT;
|
||||
}
|
||||
if messages.warning {
|
||||
flags |= vk::DEBUG_REPORT_WARNING_BIT_EXT;
|
||||
}
|
||||
if messages.performance_warning {
|
||||
flags |= vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
|
||||
}
|
||||
if messages.error {
|
||||
flags |= vk::DEBUG_REPORT_ERROR_BIT_EXT;
|
||||
}
|
||||
if messages.debug {
|
||||
flags |= vk::DEBUG_REPORT_DEBUG_BIT_EXT;
|
||||
}
|
||||
flags
|
||||
};
|
||||
|
||||
@ -140,8 +150,10 @@ impl DebugCallback {
|
||||
|
||||
let debug_report_callback = unsafe {
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateDebugReportCallbackEXT(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateDebugReportCallbackEXT(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -170,7 +182,8 @@ impl Drop for DebugCallback {
|
||||
unsafe {
|
||||
let vk = self.instance.pointers();
|
||||
vk.DestroyDebugReportCallbackEXT(self.instance.internal_object(),
|
||||
self.debug_report_callback, ptr::null());
|
||||
self.debug_report_callback,
|
||||
ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -246,8 +259,8 @@ impl error::Error for DebugCallbackCreationError {
|
||||
#[inline]
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
DebugCallbackCreationError::MissingExtension => "the `EXT_debug_report` extension was \
|
||||
not enabled",
|
||||
DebugCallbackCreationError::MissingExtension =>
|
||||
"the `EXT_debug_report` extension was not enabled",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,21 +7,21 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::error;
|
||||
use std::ffi::{CString, CStr};
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::fmt;
|
||||
use std::ptr;
|
||||
use std::str;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use instance::PhysicalDevice;
|
||||
use instance::loader;
|
||||
use instance::loader::LoadingError;
|
||||
use vk;
|
||||
use check_errors;
|
||||
|
||||
macro_rules! extensions {
|
||||
($sname:ident, $rawname:ident, $($ext:ident => $s:expr,)*) => (
|
||||
@ -402,7 +402,7 @@ impl From<Error> for SupportedExtensionsError {
|
||||
err @ Error::OutOfDeviceMemory => {
|
||||
SupportedExtensionsError::OomError(OomError::from(err))
|
||||
},
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -415,8 +415,8 @@ pub struct Unbuildable(());
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use instance::{InstanceExtensions, RawInstanceExtensions};
|
||||
use instance::{DeviceExtensions, RawDeviceExtensions};
|
||||
use instance::{InstanceExtensions, RawInstanceExtensions};
|
||||
|
||||
#[test]
|
||||
fn empty_extensions() {
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::borrow::Cow;
|
||||
use std::error;
|
||||
use std::ffi::CStr;
|
||||
@ -16,19 +17,18 @@ use std::mem;
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use instance::loader;
|
||||
use instance::loader::LoadingError;
|
||||
use check_errors;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use instance::loader;
|
||||
use instance::loader::LoadingError;
|
||||
use vk;
|
||||
|
||||
use features::Features;
|
||||
use version::Version;
|
||||
use instance::{InstanceExtensions, RawInstanceExtensions};
|
||||
use version::Version;
|
||||
|
||||
/// An instance of a Vulkan context. This is the main object that should be created by an
|
||||
/// application before everything else.
|
||||
@ -114,36 +114,41 @@ impl Instance {
|
||||
// TODO: add a test for these ^
|
||||
// TODO: if no allocator is specified by the user, use Rust's allocator instead of leaving
|
||||
// the choice to Vulkan
|
||||
pub fn new<'a, L, Ext>(app_infos: Option<&ApplicationInfo>, extensions: Ext,
|
||||
layers: L) -> Result<Arc<Instance>, InstanceCreationError>
|
||||
pub fn new<'a, L, Ext>(app_infos: Option<&ApplicationInfo>, extensions: Ext, layers: L)
|
||||
-> Result<Arc<Instance>, InstanceCreationError>
|
||||
where L: IntoIterator<Item = &'a &'a str>,
|
||||
Ext: Into<RawInstanceExtensions>,
|
||||
Ext: Into<RawInstanceExtensions>
|
||||
{
|
||||
let layers = layers.into_iter().map(|&layer| {
|
||||
CString::new(layer).unwrap()
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
let layers = layers
|
||||
.into_iter()
|
||||
.map(|&layer| CString::new(layer).unwrap())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
Instance::new_inner(app_infos, extensions.into(), layers)
|
||||
}
|
||||
|
||||
fn new_inner(app_infos: Option<&ApplicationInfo>, extensions: RawInstanceExtensions,
|
||||
layers: SmallVec<[CString; 16]>) -> Result<Arc<Instance>, InstanceCreationError>
|
||||
{
|
||||
layers: SmallVec<[CString; 16]>)
|
||||
-> Result<Arc<Instance>, InstanceCreationError> {
|
||||
// TODO: For now there are still buggy drivers that will segfault if you don't pass any
|
||||
// appinfos. Therefore for now we ensure that it can't be `None`.
|
||||
let def = Default::default();
|
||||
let app_infos = match app_infos {
|
||||
Some(a) => Some(a),
|
||||
None => Some(&def)
|
||||
None => Some(&def),
|
||||
};
|
||||
|
||||
// Building the CStrings from the `str`s within `app_infos`.
|
||||
// They need to be created ahead of time, since we pass pointers to them.
|
||||
let app_infos_strings = if let Some(app_infos) = app_infos {
|
||||
Some((
|
||||
app_infos.application_name.clone().map(|n| CString::new(n.as_bytes().to_owned()).unwrap()),
|
||||
app_infos.engine_name.clone().map(|n| CString::new(n.as_bytes().to_owned()).unwrap())
|
||||
))
|
||||
Some((app_infos
|
||||
.application_name
|
||||
.clone()
|
||||
.map(|n| CString::new(n.as_bytes().to_owned()).unwrap()),
|
||||
app_infos
|
||||
.engine_name
|
||||
.clone()
|
||||
.map(|n| CString::new(n.as_bytes().to_owned()).unwrap())))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@ -153,11 +158,33 @@ impl Instance {
|
||||
Some(vk::ApplicationInfo {
|
||||
sType: vk::STRUCTURE_TYPE_APPLICATION_INFO,
|
||||
pNext: ptr::null(),
|
||||
pApplicationName: app_infos_strings.as_ref().unwrap().0.as_ref().map(|s| s.as_ptr()).unwrap_or(ptr::null()),
|
||||
applicationVersion: app_infos.application_version.map(|v| v.into_vulkan_version()).unwrap_or(0),
|
||||
pEngineName: app_infos_strings.as_ref().unwrap().1.as_ref().map(|s| s.as_ptr()).unwrap_or(ptr::null()),
|
||||
engineVersion: app_infos.engine_version.map(|v| v.into_vulkan_version()).unwrap_or(0),
|
||||
apiVersion: Version { major: 1, minor: 0, patch: 0 }.into_vulkan_version(), // TODO:
|
||||
pApplicationName: app_infos_strings
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.0
|
||||
.as_ref()
|
||||
.map(|s| s.as_ptr())
|
||||
.unwrap_or(ptr::null()),
|
||||
applicationVersion: app_infos
|
||||
.application_version
|
||||
.map(|v| v.into_vulkan_version())
|
||||
.unwrap_or(0),
|
||||
pEngineName: app_infos_strings
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.1
|
||||
.as_ref()
|
||||
.map(|s| s.as_ptr())
|
||||
.unwrap_or(ptr::null()),
|
||||
engineVersion: app_infos
|
||||
.engine_version
|
||||
.map(|v| v.into_vulkan_version())
|
||||
.unwrap_or(0),
|
||||
apiVersion: Version {
|
||||
major: 1,
|
||||
minor: 0,
|
||||
patch: 0,
|
||||
}.into_vulkan_version(), // TODO:
|
||||
})
|
||||
|
||||
} else {
|
||||
@ -165,15 +192,17 @@ impl Instance {
|
||||
};
|
||||
|
||||
// FIXME: check whether each layer is supported
|
||||
let layers_ptr = layers.iter().map(|layer| {
|
||||
layer.as_ptr()
|
||||
}).collect::<SmallVec<[_; 16]>>();
|
||||
let layers_ptr = layers
|
||||
.iter()
|
||||
.map(|layer| layer.as_ptr())
|
||||
.collect::<SmallVec<[_; 16]>>();
|
||||
|
||||
let extensions_list = extensions.iter().map(|extension| {
|
||||
extension.as_ptr()
|
||||
}).collect::<SmallVec<[_; 32]>>();
|
||||
let extensions_list = extensions
|
||||
.iter()
|
||||
.map(|extension| extension.as_ptr())
|
||||
.collect::<SmallVec<[_; 32]>>();
|
||||
|
||||
let entry_points = try!(loader::entry_points());
|
||||
let entry_points = loader::entry_points()?;
|
||||
|
||||
// Creating the Vulkan instance.
|
||||
let instance = unsafe {
|
||||
@ -193,7 +222,7 @@ impl Instance {
|
||||
ppEnabledExtensionNames: extensions_list.as_ptr(),
|
||||
};
|
||||
|
||||
try!(check_errors(entry_points.CreateInstance(&infos, ptr::null(), &mut output)));
|
||||
check_errors(entry_points.CreateInstance(&infos, ptr::null(), &mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -201,18 +230,18 @@ impl Instance {
|
||||
let vk = {
|
||||
let f = loader::static_functions().unwrap(); // TODO: return proper error
|
||||
vk::InstancePointers::load(|name| unsafe {
|
||||
mem::transmute(f.GetInstanceProcAddr(instance, name.as_ptr()))
|
||||
mem::transmute(f.GetInstanceProcAddr(instance,
|
||||
name.as_ptr()))
|
||||
})
|
||||
};
|
||||
|
||||
// Enumerating all physical devices.
|
||||
let physical_devices: Vec<vk::PhysicalDevice> = unsafe {
|
||||
let mut num = 0;
|
||||
try!(check_errors(vk.EnumeratePhysicalDevices(instance, &mut num, ptr::null_mut())));
|
||||
check_errors(vk.EnumeratePhysicalDevices(instance, &mut num, ptr::null_mut()))?;
|
||||
|
||||
let mut devices = Vec::with_capacity(num as usize);
|
||||
try!(check_errors(vk.EnumeratePhysicalDevices(instance, &mut num,
|
||||
devices.as_mut_ptr())));
|
||||
check_errors(vk.EnumeratePhysicalDevices(instance, &mut num, devices.as_mut_ptr()))?;
|
||||
devices.set_len(num as usize);
|
||||
devices
|
||||
};
|
||||
@ -255,8 +284,7 @@ impl Instance {
|
||||
vk.GetPhysicalDeviceQueueFamilyProperties(device, &mut num, ptr::null_mut());
|
||||
|
||||
let mut families = Vec::with_capacity(num as usize);
|
||||
vk.GetPhysicalDeviceQueueFamilyProperties(device, &mut num,
|
||||
families.as_mut_ptr());
|
||||
vk.GetPhysicalDeviceQueueFamilyProperties(device, &mut num, families.as_mut_ptr());
|
||||
families.set_len(num as usize);
|
||||
families
|
||||
};
|
||||
@ -286,8 +314,10 @@ impl Instance {
|
||||
|
||||
/// Initialize all physical devices, but use VK_KHR_get_physical_device_properties2
|
||||
/// TODO: Query extension-specific physical device properties, once a new instance extension is supported.
|
||||
fn init_physical_devices2(vk: &vk::InstancePointers, physical_devices: Vec<vk::PhysicalDevice>,
|
||||
extensions: &InstanceExtensions) -> Vec<PhysicalDeviceInfos> {
|
||||
fn init_physical_devices2(vk: &vk::InstancePointers,
|
||||
physical_devices: Vec<vk::PhysicalDevice>,
|
||||
extensions: &InstanceExtensions)
|
||||
-> Vec<PhysicalDeviceInfos> {
|
||||
let mut output = Vec::with_capacity(physical_devices.len());
|
||||
|
||||
for device in physical_devices.into_iter() {
|
||||
@ -306,17 +336,23 @@ impl Instance {
|
||||
let mut num = 0;
|
||||
vk.GetPhysicalDeviceQueueFamilyProperties2KHR(device, &mut num, ptr::null_mut());
|
||||
|
||||
let mut families = (0 .. num).map(|_| {
|
||||
let mut families = (0 .. num)
|
||||
.map(|_| {
|
||||
vk::QueueFamilyProperties2KHR {
|
||||
sType: vk::STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR,
|
||||
pNext: ptr::null_mut(),
|
||||
queueFamilyProperties: mem::uninitialized(),
|
||||
}
|
||||
}).collect::<Vec<_>>();
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
vk.GetPhysicalDeviceQueueFamilyProperties2KHR(device, &mut num,
|
||||
vk.GetPhysicalDeviceQueueFamilyProperties2KHR(device,
|
||||
&mut num,
|
||||
families.as_mut_ptr());
|
||||
families.into_iter().map(|family| family.queueFamilyProperties).collect()
|
||||
families
|
||||
.into_iter()
|
||||
.map(|family| family.queueFamilyProperties)
|
||||
.collect()
|
||||
};
|
||||
|
||||
let memory: vk::PhysicalDeviceMemoryProperties = unsafe {
|
||||
@ -498,7 +534,7 @@ impl error::Error for InstanceCreationError {
|
||||
match *self {
|
||||
InstanceCreationError::LoadingError(ref err) => Some(err),
|
||||
InstanceCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -534,7 +570,7 @@ impl From<Error> for InstanceCreationError {
|
||||
Error::LayerNotPresent => InstanceCreationError::LayerNotPresent,
|
||||
Error::ExtensionNotPresent => InstanceCreationError::ExtensionNotPresent,
|
||||
Error::IncompatibleDriver => InstanceCreationError::IncompatibleDriver,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -651,11 +687,14 @@ impl<'a> PhysicalDevice<'a> {
|
||||
|
||||
/// Returns the human-readable name of the device.
|
||||
#[inline]
|
||||
pub fn name(&self) -> String { // FIXME: for some reason this panics if you use a `&str`
|
||||
pub fn name(&self) -> String {
|
||||
// FIXME: for some reason this panics if you use a `&str`
|
||||
unsafe {
|
||||
let val = self.infos().properties.deviceName;
|
||||
let val = CStr::from_ptr(val.as_ptr());
|
||||
val.to_str().expect("physical device name contained non-UTF8 characters").to_owned()
|
||||
val.to_str()
|
||||
.expect("physical device name contained non-UTF8 characters")
|
||||
.to_owned()
|
||||
}
|
||||
}
|
||||
|
||||
@ -676,13 +715,15 @@ impl<'a> PhysicalDevice<'a> {
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn ty(&self) -> PhysicalDeviceType {
|
||||
match self.instance.physical_devices[self.device].properties.deviceType {
|
||||
match self.instance.physical_devices[self.device]
|
||||
.properties
|
||||
.deviceType {
|
||||
vk::PHYSICAL_DEVICE_TYPE_OTHER => PhysicalDeviceType::Other,
|
||||
vk::PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU => PhysicalDeviceType::IntegratedGpu,
|
||||
vk::PHYSICAL_DEVICE_TYPE_DISCRETE_GPU => PhysicalDeviceType::DiscreteGpu,
|
||||
vk::PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU => PhysicalDeviceType::VirtualGpu,
|
||||
vk::PHYSICAL_DEVICE_TYPE_CPU => PhysicalDeviceType::Cpu,
|
||||
_ => panic!("Unrecognized Vulkan device type")
|
||||
_ => panic!("Unrecognized Vulkan device type"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -803,7 +844,8 @@ impl<'a> PhysicalDevice<'a> {
|
||||
/// Can be stored in a configuration file, so that you can retrieve the device again the next
|
||||
/// time the program is run.
|
||||
#[inline]
|
||||
pub fn uuid(&self) -> &[u8; 16] { // must be equal to vk::UUID_SIZE
|
||||
pub fn uuid(&self) -> &[u8; 16] {
|
||||
// must be equal to vk::UUID_SIZE
|
||||
&self.infos().properties.pipelineCacheUUID
|
||||
}
|
||||
|
||||
@ -925,8 +967,7 @@ impl<'a> QueueFamily<'a> {
|
||||
/// > operations are ever added to Vulkan.
|
||||
#[inline]
|
||||
pub fn supports_transfers(&self) -> bool {
|
||||
(self.flags() & vk::QUEUE_TRANSFER_BIT) != 0 ||
|
||||
self.supports_graphics() ||
|
||||
(self.flags() & vk::QUEUE_TRANSFER_BIT) != 0 || self.supports_graphics() ||
|
||||
self.supports_compute()
|
||||
}
|
||||
|
||||
@ -976,7 +1017,8 @@ impl<'a> Iterator for QueueFamiliesIter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ExactSizeIterator for QueueFamiliesIter<'a> {}
|
||||
impl<'a> ExactSizeIterator for QueueFamiliesIter<'a> {
|
||||
}
|
||||
|
||||
/// Represents a memory type in a physical device.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
@ -1002,7 +1044,10 @@ impl<'a> MemoryType<'a> {
|
||||
#[inline]
|
||||
pub fn heap(&self) -> MemoryHeap<'a> {
|
||||
let heap_id = self.physical_device.infos().memory.memoryTypes[self.id as usize].heapIndex;
|
||||
MemoryHeap { physical_device: self.physical_device, id: heap_id }
|
||||
MemoryHeap {
|
||||
physical_device: self.physical_device,
|
||||
id: heap_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the memory type is located on the device, which means that it's the most
|
||||
@ -1087,7 +1132,8 @@ impl<'a> Iterator for MemoryTypesIter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ExactSizeIterator for MemoryTypesIter<'a> {}
|
||||
impl<'a> ExactSizeIterator for MemoryTypesIter<'a> {
|
||||
}
|
||||
|
||||
/// Represents a memory heap in a physical device.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
@ -1156,7 +1202,8 @@ impl<'a> Iterator for MemoryHeapsIter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ExactSizeIterator for MemoryHeapsIter<'a> {}
|
||||
impl<'a> ExactSizeIterator for MemoryHeapsIter<'a> {
|
||||
}
|
||||
|
||||
/// Limits of a physical device.
|
||||
pub struct Limits<'a> {
|
||||
@ -1300,12 +1347,12 @@ mod tests {
|
||||
|
||||
let phys = match instance::PhysicalDevice::enumerate(&instance).next() {
|
||||
Some(p) => p,
|
||||
None => return
|
||||
None => return,
|
||||
};
|
||||
|
||||
let queue_family = match phys.queue_families().next() {
|
||||
Some(q) => q,
|
||||
None => return
|
||||
None => return,
|
||||
};
|
||||
|
||||
let by_id = phys.queue_family_by_id(queue_family.id()).unwrap();
|
||||
|
@ -8,18 +8,18 @@
|
||||
// according to those terms.
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::ffi::CStr;
|
||||
use std::fmt;
|
||||
use std::ptr;
|
||||
use std::vec::IntoIter;
|
||||
|
||||
use check_errors;
|
||||
use OomError;
|
||||
use Error;
|
||||
use vk;
|
||||
use OomError;
|
||||
use check_errors;
|
||||
use instance::loader;
|
||||
use instance::loader::LoadingError;
|
||||
use version::Version;
|
||||
use vk;
|
||||
|
||||
/// Queries the list of layers that are available when creating an instance.
|
||||
///
|
||||
@ -46,22 +46,21 @@ use version::Version;
|
||||
/// ```
|
||||
pub fn layers_list() -> Result<LayersIterator, LayersListError> {
|
||||
unsafe {
|
||||
let entry_points = try!(loader::entry_points());
|
||||
let entry_points = loader::entry_points()?;
|
||||
|
||||
let mut num = 0;
|
||||
try!(check_errors({
|
||||
check_errors({
|
||||
entry_points.EnumerateInstanceLayerProperties(&mut num, ptr::null_mut())
|
||||
}));
|
||||
})?;
|
||||
|
||||
let mut layers: Vec<vk::LayerProperties> = Vec::with_capacity(num as usize);
|
||||
try!(check_errors({
|
||||
entry_points.EnumerateInstanceLayerProperties(&mut num, layers.as_mut_ptr())
|
||||
}));
|
||||
check_errors({
|
||||
entry_points
|
||||
.EnumerateInstanceLayerProperties(&mut num, layers.as_mut_ptr())
|
||||
})?;
|
||||
layers.set_len(num as usize);
|
||||
|
||||
Ok(LayersIterator {
|
||||
iter: layers.into_iter()
|
||||
})
|
||||
Ok(LayersIterator { iter: layers.into_iter() })
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,7 +86,11 @@ impl LayerProperties {
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn name(&self) -> &str {
|
||||
unsafe { CStr::from_ptr(self.props.layerName.as_ptr()).to_str().unwrap() }
|
||||
unsafe {
|
||||
CStr::from_ptr(self.props.layerName.as_ptr())
|
||||
.to_str()
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a description of the layer.
|
||||
@ -105,7 +108,11 @@ impl LayerProperties {
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn description(&self) -> &str {
|
||||
unsafe { CStr::from_ptr(self.props.description.as_ptr()).to_str().unwrap() }
|
||||
unsafe {
|
||||
CStr::from_ptr(self.props.description.as_ptr())
|
||||
.to_str()
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the version of Vulkan supported by this layer.
|
||||
@ -200,7 +207,7 @@ impl From<Error> for LayersListError {
|
||||
match err {
|
||||
err @ Error::OutOfHostMemory => LayersListError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => LayersListError::OomError(OomError::from(err)),
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -208,7 +215,7 @@ impl From<Error> for LayersListError {
|
||||
/// Iterator that produces the list of layers that are available.
|
||||
// TODO: #[derive(Debug, Clone)]
|
||||
pub struct LayersIterator {
|
||||
iter: IntoIter<vk::LayerProperties>
|
||||
iter: IntoIter<vk::LayerProperties>,
|
||||
}
|
||||
|
||||
impl Iterator for LayersIterator {
|
||||
@ -236,7 +243,7 @@ mod tests {
|
||||
fn layers_list() {
|
||||
let mut list = match instance::layers_list() {
|
||||
Ok(l) => l,
|
||||
Err(_) => return
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
while let Some(_) = list.next() {}
|
||||
|
@ -20,22 +20,17 @@ use vk;
|
||||
fn load_static() -> Result<vk::Static, LoadingError> {
|
||||
use std::os::raw::c_char;
|
||||
|
||||
extern {
|
||||
extern "C" {
|
||||
fn vkGetInstanceProcAddr(instance: vk::Instance, pName: *const c_char)
|
||||
-> vk::PFN_vkVoidFunction;
|
||||
}
|
||||
|
||||
extern "system" fn wrapper(instance: vk::Instance, pName: *const c_char)
|
||||
-> vk::PFN_vkVoidFunction
|
||||
{
|
||||
unsafe {
|
||||
vkGetInstanceProcAddr(instance, pName)
|
||||
}
|
||||
-> vk::PFN_vkVoidFunction {
|
||||
unsafe { vkGetInstanceProcAddr(instance, pName) }
|
||||
}
|
||||
|
||||
Ok(vk::Static {
|
||||
GetInstanceProcAddr: wrapper,
|
||||
})
|
||||
Ok(vk::Static { GetInstanceProcAddr: wrapper })
|
||||
}
|
||||
|
||||
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
|
||||
@ -58,10 +53,11 @@ fn load_static() -> Result<vk::Static, LoadingError> {
|
||||
let name = name.to_str().unwrap();
|
||||
match lib.symbol(name) {
|
||||
Ok(s) => s,
|
||||
Err(_) => { // TODO: return error?
|
||||
Err(_) => {
|
||||
// TODO: return error?
|
||||
err = Some(LoadingError::MissingEntryPoint(name.to_owned()));
|
||||
ptr::null()
|
||||
}
|
||||
},
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -104,29 +104,30 @@
|
||||
//! Once you have chosen a physical device, you can create a `Device` object from it. See the
|
||||
//! `device` module for more info.
|
||||
//!
|
||||
pub use features::Features;
|
||||
|
||||
pub use self::extensions::DeviceExtensions;
|
||||
pub use self::extensions::InstanceExtensions;
|
||||
pub use self::extensions::RawDeviceExtensions;
|
||||
pub use self::extensions::RawInstanceExtensions;
|
||||
pub use self::instance::ApplicationInfo;
|
||||
pub use self::instance::Instance;
|
||||
pub use self::instance::InstanceCreationError;
|
||||
pub use self::instance::ApplicationInfo;
|
||||
pub use self::instance::Limits;
|
||||
pub use self::instance::MemoryHeap;
|
||||
pub use self::instance::MemoryHeapsIter;
|
||||
pub use self::instance::MemoryType;
|
||||
pub use self::instance::MemoryTypesIter;
|
||||
pub use self::instance::PhysicalDevice;
|
||||
pub use self::instance::PhysicalDevicesIter;
|
||||
pub use self::instance::PhysicalDeviceType;
|
||||
pub use self::instance::PhysicalDevicesIter;
|
||||
pub use self::instance::QueueFamiliesIter;
|
||||
pub use self::instance::QueueFamily;
|
||||
pub use self::instance::MemoryTypesIter;
|
||||
pub use self::instance::MemoryType;
|
||||
pub use self::instance::MemoryHeapsIter;
|
||||
pub use self::instance::MemoryHeap;
|
||||
pub use self::instance::Limits;
|
||||
pub use self::layers::layers_list;
|
||||
pub use self::layers::LayerProperties;
|
||||
pub use self::layers::LayersIterator;
|
||||
pub use self::layers::LayersListError;
|
||||
pub use self::layers::layers_list;
|
||||
pub use self::loader::LoadingError;
|
||||
pub use features::Features;
|
||||
pub use version::Version;
|
||||
|
||||
pub mod debug;
|
||||
|
@ -99,9 +99,12 @@ use std::sync::MutexGuard;
|
||||
|
||||
/// Alternative to the `Deref` trait. Contrary to `Deref`, must always return the same object.
|
||||
pub unsafe trait SafeDeref: Deref {}
|
||||
unsafe impl<'a, T: ?Sized> SafeDeref for &'a T {}
|
||||
unsafe impl<T: ?Sized> SafeDeref for Arc<T> {}
|
||||
unsafe impl<T: ?Sized> SafeDeref for Box<T> {}
|
||||
unsafe impl<'a, T: ?Sized> SafeDeref for &'a T {
|
||||
}
|
||||
unsafe impl<T: ?Sized> SafeDeref for Arc<T> {
|
||||
}
|
||||
unsafe impl<T: ?Sized> SafeDeref for Box<T> {
|
||||
}
|
||||
|
||||
/// Gives access to the internal identifier of an object.
|
||||
pub unsafe trait VulkanObject {
|
||||
@ -154,7 +157,7 @@ impl From<Error> for OomError {
|
||||
match err {
|
||||
Error::OutOfHostMemory => OomError::OutOfHostMemory,
|
||||
Error::OutOfDeviceMemory => OomError::OutOfDeviceMemory,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -226,7 +229,8 @@ fn check_errors(result: vk::Result) -> Result<Success, Error> {
|
||||
vk::ERROR_INCOMPATIBLE_DISPLAY_KHR => Err(Error::IncompatibleDisplay),
|
||||
vk::ERROR_VALIDATION_FAILED_EXT => Err(Error::ValidationFailed),
|
||||
vk::ERROR_OUT_OF_POOL_MEMORY_KHR => Err(Error::OutOfPoolMemory),
|
||||
vk::ERROR_INVALID_SHADER_NV => panic!("Vulkan function returned VK_ERROR_INVALID_SHADER_NV"),
|
||||
c => unreachable!("Unexpected error code returned by Vulkan: {}", c)
|
||||
vk::ERROR_INVALID_SHADER_NV => panic!("Vulkan function returned \
|
||||
VK_ERROR_INVALID_SHADER_NV"),
|
||||
c => unreachable!("Unexpected error code returned by Vulkan: {}", c),
|
||||
}
|
||||
}
|
||||
|
@ -9,20 +9,20 @@
|
||||
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::ops::Deref;
|
||||
use std::ops::DerefMut;
|
||||
use std::ops::Range;
|
||||
use std::os::raw::c_void;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use instance::MemoryType;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use memory::Content;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use instance::MemoryType;
|
||||
use memory::Content;
|
||||
use vk;
|
||||
|
||||
/// Represents memory that has been allocated.
|
||||
@ -61,8 +61,7 @@ impl DeviceMemory {
|
||||
// TODO: VK_ERROR_TOO_MANY_OBJECTS error
|
||||
#[inline]
|
||||
pub fn alloc(device: Arc<Device>, memory_type: MemoryType, size: usize)
|
||||
-> Result<DeviceMemory, OomError>
|
||||
{
|
||||
-> Result<DeviceMemory, OomError> {
|
||||
assert!(size >= 1);
|
||||
assert_eq!(device.physical_device().internal_object(),
|
||||
memory_type.physical_device().internal_object());
|
||||
@ -85,8 +84,10 @@ impl DeviceMemory {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.AllocateMemory(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.AllocateMemory(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -106,20 +107,22 @@ impl DeviceMemory {
|
||||
/// - Panics if the memory type is not host-visible.
|
||||
///
|
||||
pub fn alloc_and_map(device: Arc<Device>, memory_type: MemoryType, size: usize)
|
||||
-> Result<MappedDeviceMemory, OomError>
|
||||
{
|
||||
-> Result<MappedDeviceMemory, OomError> {
|
||||
let vk = device.pointers();
|
||||
|
||||
assert!(memory_type.is_host_visible());
|
||||
let mem = try!(DeviceMemory::alloc(device.clone(), memory_type, size));
|
||||
let mem = DeviceMemory::alloc(device.clone(), memory_type, size)?;
|
||||
|
||||
let coherent = memory_type.is_host_coherent();
|
||||
|
||||
let ptr = unsafe {
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.MapMemory(device.internal_object(), mem.memory, 0,
|
||||
mem.size as vk::DeviceSize, 0 /* reserved flags */,
|
||||
&mut output)));
|
||||
check_errors(vk.MapMemory(device.internal_object(),
|
||||
mem.memory,
|
||||
0,
|
||||
mem.size as vk::DeviceSize,
|
||||
0, /* reserved flags */
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -133,7 +136,10 @@ impl DeviceMemory {
|
||||
/// Returns the memory type this chunk was allocated on.
|
||||
#[inline]
|
||||
pub fn memory_type(&self) -> MemoryType {
|
||||
self.device.physical_device().memory_type_by_id(self.memory_type_index).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.memory_type_by_id(self.memory_type_index)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Returns the size in bytes of that memory chunk.
|
||||
@ -253,7 +259,8 @@ impl MappedDeviceMemory {
|
||||
{
|
||||
let vk = self.memory.device().pointers();
|
||||
let pointer = T::ref_from_ptr((self.pointer as usize + range.start) as *mut _,
|
||||
range.end - range.start).unwrap(); // TODO: error
|
||||
range.end - range.start)
|
||||
.unwrap(); // TODO: error
|
||||
|
||||
if !self.coherent {
|
||||
let range = vk::MappedMemoryRange {
|
||||
@ -298,8 +305,10 @@ unsafe impl DeviceOwned for MappedDeviceMemory {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for MappedDeviceMemory {}
|
||||
unsafe impl Sync for MappedDeviceMemory {}
|
||||
unsafe impl Send for MappedDeviceMemory {
|
||||
}
|
||||
unsafe impl Sync for MappedDeviceMemory {
|
||||
}
|
||||
|
||||
impl fmt::Debug for MappedDeviceMemory {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
@ -339,8 +348,10 @@ impl<'a, T: ?Sized + 'a> CpuAccess<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {
|
||||
}
|
||||
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + 'a> Deref for CpuAccess<'a, T> {
|
||||
type Target = T;
|
||||
@ -375,8 +386,7 @@ impl<'a, T: ?Sized + 'a> Drop for CpuAccess<'a, T> {
|
||||
|
||||
// TODO: check result?
|
||||
unsafe {
|
||||
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(),
|
||||
1, &range);
|
||||
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(), 1, &range);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -406,12 +416,16 @@ mod tests {
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
fn oom_single() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let mem_ty = device.physical_device().memory_types().filter(|m| !m.is_lazily_allocated())
|
||||
.next().unwrap();
|
||||
let mem_ty = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|m| !m.is_lazily_allocated())
|
||||
.next()
|
||||
.unwrap();
|
||||
|
||||
match DeviceMemory::alloc(device.clone(), mem_ty, 0xffffffffffffffff) {
|
||||
Err(OomError::OutOfDeviceMemory) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -419,8 +433,12 @@ mod tests {
|
||||
#[ignore] // TODO: test fails for now on Mesa+Intel
|
||||
fn oom_multi() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let mem_ty = device.physical_device().memory_types().filter(|m| !m.is_lazily_allocated())
|
||||
.next().unwrap();
|
||||
let mem_ty = device
|
||||
.physical_device()
|
||||
.memory_types()
|
||||
.filter(|m| !m.is_lazily_allocated())
|
||||
.next()
|
||||
.unwrap();
|
||||
let heap_size = mem_ty.heap().size();
|
||||
|
||||
let mut allocs = Vec::new();
|
||||
@ -429,7 +447,7 @@ mod tests {
|
||||
match DeviceMemory::alloc(device.clone(), mem_ty, heap_size / 3) {
|
||||
Err(OomError::OutOfDeviceMemory) => return, // test succeeded
|
||||
Ok(a) => allocs.push(a),
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,12 +12,12 @@ use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use OomError;
|
||||
use device::Device;
|
||||
use instance::Instance;
|
||||
use instance::MemoryType;
|
||||
use memory::DeviceMemory;
|
||||
use memory::MappedDeviceMemory;
|
||||
use OomError;
|
||||
|
||||
/// Memory pool that operates on a given memory type.
|
||||
#[derive(Debug)]
|
||||
@ -36,12 +36,11 @@ impl StdHostVisibleMemoryTypePool {
|
||||
/// - Panics if the `device` and `memory_type` don't belong to the same physical device.
|
||||
///
|
||||
#[inline]
|
||||
pub fn new(device: Arc<Device>, memory_type: MemoryType)
|
||||
-> Arc<StdHostVisibleMemoryTypePool>
|
||||
{
|
||||
pub fn new(device: Arc<Device>, memory_type: MemoryType) -> Arc<StdHostVisibleMemoryTypePool> {
|
||||
assert_eq!(&**device.physical_device().instance() as *const Instance,
|
||||
&**memory_type.physical_device().instance() as *const Instance);
|
||||
assert_eq!(device.physical_device().index(), memory_type.physical_device().index());
|
||||
assert_eq!(device.physical_device().index(),
|
||||
memory_type.physical_device().index());
|
||||
|
||||
Arc::new(StdHostVisibleMemoryTypePool {
|
||||
device: device.clone(),
|
||||
@ -58,12 +57,14 @@ impl StdHostVisibleMemoryTypePool {
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
pub fn alloc(me: &Arc<Self>, size: usize, alignment: usize)
|
||||
-> Result<StdHostVisibleMemoryTypePoolAlloc, OomError>
|
||||
{
|
||||
-> Result<StdHostVisibleMemoryTypePoolAlloc, OomError> {
|
||||
assert!(size != 0);
|
||||
assert!(alignment != 0);
|
||||
|
||||
#[inline] fn align(val: usize, al: usize) -> usize { al * (1 + (val - 1) / al) }
|
||||
#[inline]
|
||||
fn align(val: usize, al: usize) -> usize {
|
||||
al * (1 + (val - 1) / al)
|
||||
}
|
||||
|
||||
// Find a location.
|
||||
let mut occupied = me.occupied.lock().unwrap();
|
||||
@ -103,7 +104,8 @@ impl StdHostVisibleMemoryTypePool {
|
||||
let new_block = {
|
||||
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
||||
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
|
||||
let new_block = try!(DeviceMemory::alloc_and_map(me.device.clone(), me.memory_type(), to_alloc));
|
||||
let new_block =
|
||||
DeviceMemory::alloc_and_map(me.device.clone(), me.memory_type(), to_alloc)?;
|
||||
Arc::new(new_block)
|
||||
};
|
||||
|
||||
@ -125,7 +127,10 @@ impl StdHostVisibleMemoryTypePool {
|
||||
/// Returns the memory type this pool operates on.
|
||||
#[inline]
|
||||
pub fn memory_type(&self) -> MemoryType {
|
||||
self.device.physical_device().memory_type_by_id(self.memory_type).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.memory_type_by_id(self.memory_type)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -158,8 +163,10 @@ impl Drop for StdHostVisibleMemoryTypePoolAlloc {
|
||||
fn drop(&mut self) {
|
||||
let mut occupied = self.pool.occupied.lock().unwrap();
|
||||
|
||||
let entries = occupied.iter_mut()
|
||||
.find(|e| &*e.0 as *const MappedDeviceMemory == &*self.memory).unwrap();
|
||||
let entries = occupied
|
||||
.iter_mut()
|
||||
.find(|e| &*e.0 as *const MappedDeviceMemory == &*self.memory)
|
||||
.unwrap();
|
||||
|
||||
entries.1.retain(|e| e.start != self.offset);
|
||||
}
|
||||
|
@ -7,17 +7,17 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use OomError;
|
||||
use instance::MemoryType;
|
||||
use memory::DeviceMemory;
|
||||
use memory::MappedDeviceMemory;
|
||||
use OomError;
|
||||
|
||||
pub use self::pool::StdMemoryPool;
|
||||
pub use self::pool::StdMemoryPoolAlloc;
|
||||
pub use self::host_visible::StdHostVisibleMemoryTypePool;
|
||||
pub use self::host_visible::StdHostVisibleMemoryTypePoolAlloc;
|
||||
pub use self::non_host_visible::StdNonHostVisibleMemoryTypePool;
|
||||
pub use self::non_host_visible::StdNonHostVisibleMemoryTypePoolAlloc;
|
||||
pub use self::pool::StdMemoryPool;
|
||||
pub use self::pool::StdMemoryPoolAlloc;
|
||||
|
||||
mod host_visible;
|
||||
mod non_host_visible;
|
||||
|
@ -12,11 +12,11 @@ use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use OomError;
|
||||
use device::Device;
|
||||
use instance::Instance;
|
||||
use instance::MemoryType;
|
||||
use memory::DeviceMemory;
|
||||
use OomError;
|
||||
|
||||
/// Memory pool that operates on a given memory type.
|
||||
#[derive(Debug)]
|
||||
@ -36,11 +36,11 @@ impl StdNonHostVisibleMemoryTypePool {
|
||||
///
|
||||
#[inline]
|
||||
pub fn new(device: Arc<Device>, memory_type: MemoryType)
|
||||
-> Arc<StdNonHostVisibleMemoryTypePool>
|
||||
{
|
||||
-> Arc<StdNonHostVisibleMemoryTypePool> {
|
||||
assert_eq!(&**device.physical_device().instance() as *const Instance,
|
||||
&**memory_type.physical_device().instance() as *const Instance);
|
||||
assert_eq!(device.physical_device().index(), memory_type.physical_device().index());
|
||||
assert_eq!(device.physical_device().index(),
|
||||
memory_type.physical_device().index());
|
||||
|
||||
Arc::new(StdNonHostVisibleMemoryTypePool {
|
||||
device: device.clone(),
|
||||
@ -57,12 +57,14 @@ impl StdNonHostVisibleMemoryTypePool {
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
pub fn alloc(me: &Arc<Self>, size: usize, alignment: usize)
|
||||
-> Result<StdNonHostVisibleMemoryTypePoolAlloc, OomError>
|
||||
{
|
||||
-> Result<StdNonHostVisibleMemoryTypePoolAlloc, OomError> {
|
||||
assert!(size != 0);
|
||||
assert!(alignment != 0);
|
||||
|
||||
#[inline] fn align(val: usize, al: usize) -> usize { al * (1 + (val - 1) / al) }
|
||||
#[inline]
|
||||
fn align(val: usize, al: usize) -> usize {
|
||||
al * (1 + (val - 1) / al)
|
||||
}
|
||||
|
||||
// Find a location.
|
||||
let mut occupied = me.occupied.lock().unwrap();
|
||||
@ -102,7 +104,7 @@ impl StdNonHostVisibleMemoryTypePool {
|
||||
let new_block = {
|
||||
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
||||
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
|
||||
let new_block = try!(DeviceMemory::alloc(me.device.clone(), me.memory_type(), to_alloc));
|
||||
let new_block = DeviceMemory::alloc(me.device.clone(), me.memory_type(), to_alloc)?;
|
||||
Arc::new(new_block)
|
||||
};
|
||||
|
||||
@ -124,7 +126,10 @@ impl StdNonHostVisibleMemoryTypePool {
|
||||
/// Returns the memory type this pool operates on.
|
||||
#[inline]
|
||||
pub fn memory_type(&self) -> MemoryType {
|
||||
self.device.physical_device().memory_type_by_id(self.memory_type).unwrap()
|
||||
self.device
|
||||
.physical_device()
|
||||
.memory_type_by_id(self.memory_type)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,8 +162,10 @@ impl Drop for StdNonHostVisibleMemoryTypePoolAlloc {
|
||||
fn drop(&mut self) {
|
||||
let mut occupied = self.pool.occupied.lock().unwrap();
|
||||
|
||||
let entries = occupied.iter_mut()
|
||||
.find(|e| &*e.0 as *const DeviceMemory == &*self.memory).unwrap();
|
||||
let entries = occupied
|
||||
.iter_mut()
|
||||
.find(|e| &*e.0 as *const DeviceMemory == &*self.memory)
|
||||
.unwrap();
|
||||
|
||||
entries.1.retain(|e| e.start != self.offset);
|
||||
}
|
||||
|
@ -7,15 +7,18 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use fnv::FnvHasher;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::hash::BuildHasherDefault;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use fnv::FnvHasher;
|
||||
|
||||
use OomError;
|
||||
use device::Device;
|
||||
use instance::MemoryType;
|
||||
use memory::DeviceMemory;
|
||||
use memory::MappedDeviceMemory;
|
||||
use memory::pool::AllocLayout;
|
||||
use memory::pool::MemoryPool;
|
||||
use memory::pool::MemoryPoolAlloc;
|
||||
@ -23,9 +26,6 @@ use memory::pool::StdHostVisibleMemoryTypePool;
|
||||
use memory::pool::StdHostVisibleMemoryTypePoolAlloc;
|
||||
use memory::pool::StdNonHostVisibleMemoryTypePool;
|
||||
use memory::pool::StdNonHostVisibleMemoryTypePoolAlloc;
|
||||
use memory::DeviceMemory;
|
||||
use memory::MappedDeviceMemory;
|
||||
use OomError;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StdMemoryPool {
|
||||
@ -52,23 +52,28 @@ impl StdMemoryPool {
|
||||
unsafe impl MemoryPool for Arc<StdMemoryPool> {
|
||||
type Alloc = StdMemoryPoolAlloc;
|
||||
|
||||
fn alloc(&self, memory_type: MemoryType, size: usize, alignment: usize,
|
||||
layout: AllocLayout) -> Result<StdMemoryPoolAlloc, OomError>
|
||||
{
|
||||
fn alloc(&self, memory_type: MemoryType, size: usize, alignment: usize, layout: AllocLayout)
|
||||
-> Result<StdMemoryPoolAlloc, OomError> {
|
||||
let mut pools = self.pools.lock().unwrap();
|
||||
|
||||
match pools.entry((memory_type.id(), layout)) {
|
||||
Entry::Occupied(entry) => {
|
||||
match entry.get() {
|
||||
&Pool::HostVisible(ref pool) => {
|
||||
let alloc = try!(StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
|
||||
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
|
||||
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
|
||||
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
|
||||
Ok(StdMemoryPoolAlloc {
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
},
|
||||
&Pool::NonHostVisible(ref pool) => {
|
||||
let alloc = try!(StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
|
||||
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
|
||||
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
|
||||
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
|
||||
Ok(StdMemoryPoolAlloc {
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -76,18 +81,26 @@ unsafe impl MemoryPool for Arc<StdMemoryPool> {
|
||||
Entry::Vacant(entry) => {
|
||||
match memory_type.is_host_visible() {
|
||||
true => {
|
||||
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
|
||||
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(),
|
||||
memory_type);
|
||||
entry.insert(Pool::HostVisible(pool.clone()));
|
||||
let alloc = try!(StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
|
||||
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
|
||||
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
|
||||
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
|
||||
Ok(StdMemoryPoolAlloc {
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
},
|
||||
false => {
|
||||
let pool = StdNonHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
|
||||
let pool = StdNonHostVisibleMemoryTypePool::new(self.device.clone(),
|
||||
memory_type);
|
||||
entry.insert(Pool::NonHostVisible(pool.clone()));
|
||||
let alloc = try!(StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment));
|
||||
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
|
||||
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
|
||||
Ok(StdMemoryPoolAlloc { inner: inner, pool: self.clone() })
|
||||
Ok(StdMemoryPoolAlloc {
|
||||
inner: inner,
|
||||
pool: self.clone(),
|
||||
})
|
||||
},
|
||||
}
|
||||
},
|
||||
|
@ -165,10 +165,18 @@ impl Into<vk::PipelineColorBlendAttachmentState> for AttachmentBlend {
|
||||
alphaBlendOp: self.alpha_op as u32,
|
||||
colorWriteMask: {
|
||||
let mut mask = 0;
|
||||
if self.mask_red { mask |= vk::COLOR_COMPONENT_R_BIT; }
|
||||
if self.mask_green { mask |= vk::COLOR_COMPONENT_G_BIT; }
|
||||
if self.mask_blue { mask |= vk::COLOR_COMPONENT_B_BIT; }
|
||||
if self.mask_alpha { mask |= vk::COLOR_COMPONENT_A_BIT; }
|
||||
if self.mask_red {
|
||||
mask |= vk::COLOR_COMPONENT_R_BIT;
|
||||
}
|
||||
if self.mask_green {
|
||||
mask |= vk::COLOR_COMPONENT_G_BIT;
|
||||
}
|
||||
if self.mask_blue {
|
||||
mask |= vk::COLOR_COMPONENT_B_BIT;
|
||||
}
|
||||
if self.mask_alpha {
|
||||
mask |= vk::COLOR_COMPONENT_A_BIT;
|
||||
}
|
||||
mask
|
||||
},
|
||||
}
|
||||
|
@ -22,6 +22,7 @@
|
||||
//! of [`get_data`](struct.PipelineCache.html#method.get_data) for example of how to store the data
|
||||
//! on the disk, and [`with_data`](struct.PipelineCache.html#method.with_data) for how to reload it.
|
||||
//!
|
||||
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
@ -82,8 +83,7 @@ impl PipelineCache {
|
||||
/// ```
|
||||
#[inline]
|
||||
pub unsafe fn with_data(device: Arc<Device>, initial_data: &[u8])
|
||||
-> Result<Arc<PipelineCache>, OomError>
|
||||
{
|
||||
-> Result<Arc<PipelineCache>, OomError> {
|
||||
PipelineCache::new_impl(device, Some(initial_data))
|
||||
}
|
||||
|
||||
@ -105,8 +105,7 @@ impl PipelineCache {
|
||||
|
||||
// Actual implementation of the constructor.
|
||||
unsafe fn new_impl(device: Arc<Device>, initial_data: Option<&[u8]>)
|
||||
-> Result<Arc<PipelineCache>, OomError>
|
||||
{
|
||||
-> Result<Arc<PipelineCache>, OomError> {
|
||||
let vk = device.pointers();
|
||||
|
||||
let cache = {
|
||||
@ -115,12 +114,16 @@ impl PipelineCache {
|
||||
pNext: ptr::null(),
|
||||
flags: 0, // reserved
|
||||
initialDataSize: initial_data.map(|d| d.len()).unwrap_or(0),
|
||||
pInitialData: initial_data.map(|d| d.as_ptr() as *const _).unwrap_or(ptr::null()),
|
||||
pInitialData: initial_data
|
||||
.map(|d| d.as_ptr() as *const _)
|
||||
.unwrap_or(ptr::null()),
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreatePipelineCache(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreatePipelineCache(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -146,13 +149,18 @@ impl PipelineCache {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
|
||||
let pipelines = pipelines.into_iter().map(|pipeline| {
|
||||
let pipelines = pipelines
|
||||
.into_iter()
|
||||
.map(|pipeline| {
|
||||
assert!(&***pipeline as *const _ != &*self as *const _);
|
||||
pipeline.cache
|
||||
}).collect::<Vec<_>>();
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
try!(check_errors(vk.MergePipelineCaches(self.device.internal_object(), self.cache,
|
||||
pipelines.len() as u32, pipelines.as_ptr())));
|
||||
check_errors(vk.MergePipelineCaches(self.device.internal_object(),
|
||||
self.cache,
|
||||
pipelines.len() as u32,
|
||||
pipelines.as_ptr()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -191,12 +199,16 @@ impl PipelineCache {
|
||||
let vk = self.device.pointers();
|
||||
|
||||
let mut num = 0;
|
||||
try!(check_errors(vk.GetPipelineCacheData(self.device.internal_object(), self.cache,
|
||||
&mut num, ptr::null_mut())));
|
||||
check_errors(vk.GetPipelineCacheData(self.device.internal_object(),
|
||||
self.cache,
|
||||
&mut num,
|
||||
ptr::null_mut()))?;
|
||||
|
||||
let mut data: Vec<u8> = Vec::with_capacity(num as usize);
|
||||
try!(check_errors(vk.GetPipelineCacheData(self.device.internal_object(), self.cache,
|
||||
&mut num, data.as_mut_ptr() as *mut _)));
|
||||
check_errors(vk.GetPipelineCacheData(self.device.internal_object(),
|
||||
self.cache,
|
||||
&mut num,
|
||||
data.as_mut_ptr() as *mut _))?;
|
||||
data.set_len(num as usize);
|
||||
|
||||
Ok(data)
|
||||
|
@ -17,24 +17,24 @@ use std::sync::Arc;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSys;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use descriptor::pipeline_layout::PipelineLayoutCreationError;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSuperset;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use descriptor::pipeline_layout::PipelineLayoutNotSupersetError;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSuperset;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSys;
|
||||
use pipeline::shader::ComputeShaderEntryPoint;
|
||||
use pipeline::shader::SpecializationConstants;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use Error;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use vk;
|
||||
|
||||
/// A pipeline object that describes to the Vulkan implementation how it should perform compute
|
||||
@ -56,15 +56,17 @@ struct Inner {
|
||||
|
||||
impl ComputePipeline<()> {
|
||||
/// Builds a new `ComputePipeline`.
|
||||
pub fn new<Css, Csl>(device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>,
|
||||
specialization: &Css)
|
||||
pub fn new<Css, Csl>(
|
||||
device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>, specialization: &Css)
|
||||
-> Result<ComputePipeline<PipelineLayout<Csl>>, ComputePipelineCreationError>
|
||||
where Csl: PipelineLayoutDescNames + Clone,
|
||||
Css: SpecializationConstants
|
||||
{
|
||||
unsafe {
|
||||
let pipeline_layout = shader.layout().clone().build(device.clone())?;
|
||||
ComputePipeline::with_unchecked_pipeline_layout(device, shader, specialization,
|
||||
ComputePipeline::with_unchecked_pipeline_layout(device,
|
||||
shader,
|
||||
specialization,
|
||||
pipeline_layout)
|
||||
}
|
||||
}
|
||||
@ -75,32 +77,32 @@ impl<Pl> ComputePipeline<Pl> {
|
||||
///
|
||||
/// An error will be returned if the pipeline layout isn't a superset of what the shader
|
||||
/// uses.
|
||||
pub fn with_pipeline_layout<Css, Csl>(device: Arc<Device>,
|
||||
shader: &ComputeShaderEntryPoint<Css, Csl>,
|
||||
specialization: &Css,
|
||||
pub fn with_pipeline_layout<Css, Csl>(
|
||||
device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>, specialization: &Css,
|
||||
pipeline_layout: Pl)
|
||||
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
|
||||
where Csl: PipelineLayoutDescNames + Clone,
|
||||
Css: SpecializationConstants,
|
||||
Pl: PipelineLayoutAbstract,
|
||||
Pl: PipelineLayoutAbstract
|
||||
{
|
||||
unsafe {
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout, shader.layout())?;
|
||||
ComputePipeline::with_unchecked_pipeline_layout(device, shader, specialization,
|
||||
ComputePipeline::with_unchecked_pipeline_layout(device,
|
||||
shader,
|
||||
specialization,
|
||||
pipeline_layout)
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as `with_pipeline_layout`, but doesn't check whether the pipeline layout is a
|
||||
/// superset of what the shader expects.
|
||||
pub unsafe fn with_unchecked_pipeline_layout<Css, Csl>(device: Arc<Device>,
|
||||
shader: &ComputeShaderEntryPoint<Css, Csl>,
|
||||
specialization: &Css,
|
||||
pub unsafe fn with_unchecked_pipeline_layout<Css, Csl>(
|
||||
device: Arc<Device>, shader: &ComputeShaderEntryPoint<Css, Csl>, specialization: &Css,
|
||||
pipeline_layout: Pl)
|
||||
-> Result<ComputePipeline<Pl>, ComputePipelineCreationError>
|
||||
where Csl: PipelineLayoutDescNames + Clone,
|
||||
Css: SpecializationConstants,
|
||||
Pl: PipelineLayoutAbstract,
|
||||
Pl: PipelineLayoutAbstract
|
||||
{
|
||||
let vk = device.pointers();
|
||||
|
||||
@ -138,8 +140,12 @@ impl<Pl> ComputePipeline<Pl> {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateComputePipelines(device.internal_object(), 0,
|
||||
1, &infos, ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateComputePipelines(device.internal_object(),
|
||||
0,
|
||||
1,
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -190,7 +196,8 @@ unsafe impl<Pl> ComputePipelineAbstract for ComputePipeline<Pl>
|
||||
}
|
||||
|
||||
unsafe impl<T> ComputePipelineAbstract for T
|
||||
where T: SafeDeref, T::Target: ComputePipelineAbstract
|
||||
where T: SafeDeref,
|
||||
T::Target: ComputePipelineAbstract
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> ComputePipelineSys {
|
||||
@ -212,7 +219,9 @@ unsafe impl<'a> VulkanObject for ComputePipelineSys<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Pl> PipelineLayoutAbstract for ComputePipeline<Pl> where Pl: PipelineLayoutAbstract {
|
||||
unsafe impl<Pl> PipelineLayoutAbstract for ComputePipeline<Pl>
|
||||
where Pl: PipelineLayoutAbstract
|
||||
{
|
||||
#[inline]
|
||||
fn sys(&self) -> PipelineLayoutSys {
|
||||
self.layout().sys()
|
||||
@ -224,7 +233,9 @@ unsafe impl<Pl> PipelineLayoutAbstract for ComputePipeline<Pl> where Pl: Pipelin
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Pl> PipelineLayoutDesc for ComputePipeline<Pl> where Pl: PipelineLayoutDesc {
|
||||
unsafe impl<Pl> PipelineLayoutDesc for ComputePipeline<Pl>
|
||||
where Pl: PipelineLayoutDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
self.pipeline_layout.num_sets()
|
||||
@ -251,7 +262,9 @@ unsafe impl<Pl> PipelineLayoutDesc for ComputePipeline<Pl> where Pl: PipelineLay
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Pl> PipelineLayoutDescNames for ComputePipeline<Pl> where Pl: PipelineLayoutDescNames {
|
||||
unsafe impl<Pl> PipelineLayoutDescNames for ComputePipeline<Pl>
|
||||
where Pl: PipelineLayoutDescNames
|
||||
{
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
self.pipeline_layout.descriptor_by_name(name)
|
||||
@ -301,12 +314,10 @@ impl error::Error for ComputePipelineCreationError {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
ComputePipelineCreationError::OomError(_) => "not enough memory available",
|
||||
ComputePipelineCreationError::PipelineLayoutCreationError(_) => "error while creating \
|
||||
the pipeline layout \
|
||||
object",
|
||||
ComputePipelineCreationError::IncompatiblePipelineLayout(_) => "the pipeline layout is \
|
||||
not compatible with what \
|
||||
the shader expects",
|
||||
ComputePipelineCreationError::PipelineLayoutCreationError(_) =>
|
||||
"error while creating the pipeline layout object",
|
||||
ComputePipelineCreationError::IncompatiblePipelineLayout(_) =>
|
||||
"the pipeline layout is not compatible with what the shader expects",
|
||||
}
|
||||
}
|
||||
|
||||
@ -358,7 +369,7 @@ impl From<Error> for ComputePipelineCreationError {
|
||||
err @ Error::OutOfDeviceMemory => {
|
||||
ComputePipelineCreationError::OomError(OomError::from(err))
|
||||
},
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,6 @@
|
||||
// to avoid duplicating code, so we hide the warnings for now
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use descriptor::pipeline_layout::EmptyPipelineDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutAbstract;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
@ -19,9 +18,9 @@ use device::Device;
|
||||
use framebuffer::RenderPassAbstract;
|
||||
use framebuffer::RenderPassSubpassInterface;
|
||||
use framebuffer::Subpass;
|
||||
use pipeline::blend::Blend;
|
||||
use pipeline::blend::AttachmentsBlend;
|
||||
use pipeline::blend::AttachmentBlend;
|
||||
use pipeline::blend::AttachmentsBlend;
|
||||
use pipeline::blend::Blend;
|
||||
use pipeline::blend::LogicOp;
|
||||
use pipeline::depth_stencil::DepthStencil;
|
||||
use pipeline::graphics_pipeline::GraphicsPipeline;
|
||||
@ -36,23 +35,46 @@ use pipeline::raster::FrontFace;
|
||||
use pipeline::raster::PolygonMode;
|
||||
use pipeline::raster::Rasterization;
|
||||
use pipeline::shader::EmptyShaderInterfaceDef;
|
||||
use pipeline::shader::FragmentShaderEntryPoint;
|
||||
use pipeline::shader::GeometryShaderEntryPoint;
|
||||
use pipeline::shader::ShaderInterfaceDef;
|
||||
use pipeline::shader::ShaderInterfaceDefMatch;
|
||||
use pipeline::shader::VertexShaderEntryPoint;
|
||||
use pipeline::shader::TessControlShaderEntryPoint;
|
||||
use pipeline::shader::TessEvaluationShaderEntryPoint;
|
||||
use pipeline::shader::GeometryShaderEntryPoint;
|
||||
use pipeline::shader::FragmentShaderEntryPoint;
|
||||
use pipeline::shader::VertexShaderEntryPoint;
|
||||
use pipeline::vertex::SingleBufferDefinition;
|
||||
use pipeline::vertex::VertexDefinition;
|
||||
use pipeline::viewport::Scissor;
|
||||
use pipeline::viewport::Viewport;
|
||||
use pipeline::viewport::ViewportsState;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Prototype for a `GraphicsPipeline`.
|
||||
// TODO: we can optimize this by filling directly the raw vk structs
|
||||
pub struct GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
|
||||
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp> {
|
||||
pub struct GraphicsPipelineBuilder<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp>
|
||||
{
|
||||
vertex_input: Vdef,
|
||||
vertex_shader: Option<VertexShaderEntryPoint<'a, Vsp, Vi, Vo, Vl>>,
|
||||
input_assembly: InputAssembly,
|
||||
@ -67,14 +89,30 @@ pub struct GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl
|
||||
render_pass: Option<Subpass<Rp>>,
|
||||
}
|
||||
|
||||
impl<'a> GraphicsPipelineBuilder<'a, SingleBufferDefinition<()>, (), (), (), (), (),
|
||||
EmptyShaderInterfaceDef, EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc, (), EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef, EmptyPipelineDesc, (),
|
||||
EmptyShaderInterfaceDef, EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc, (), EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef, EmptyPipelineDesc, ()>
|
||||
{
|
||||
impl<'a>
|
||||
GraphicsPipelineBuilder<'a,
|
||||
SingleBufferDefinition<()>,
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
()> {
|
||||
/// Builds a new empty builder.
|
||||
pub(super) fn new() -> Self {
|
||||
GraphicsPipelineBuilder {
|
||||
@ -94,10 +132,52 @@ impl<'a> GraphicsPipelineBuilder<'a, SingleBufferDefinition<()>, (), (), (), (),
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi,
|
||||
Fo, Fl, Rp>
|
||||
GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
|
||||
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
|
||||
impl<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp>
|
||||
GraphicsPipelineBuilder<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp>
|
||||
where Vdef: VertexDefinition<Vi>,
|
||||
Vl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
|
||||
Fl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
|
||||
@ -111,46 +191,128 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
Tco: ShaderInterfaceDef,
|
||||
Teo: ShaderInterfaceDef,
|
||||
Go: ShaderInterfaceDef,
|
||||
Fi: ShaderInterfaceDefMatch<Go> + ShaderInterfaceDefMatch<Teo> + ShaderInterfaceDefMatch<Vo>,
|
||||
Fi: ShaderInterfaceDefMatch<Go>
|
||||
+ ShaderInterfaceDefMatch<Teo>
|
||||
+ ShaderInterfaceDefMatch<Vo>,
|
||||
Fo: ShaderInterfaceDef,
|
||||
Rp: RenderPassAbstract + RenderPassSubpassInterface<Fo>,
|
||||
Rp: RenderPassAbstract + RenderPassSubpassInterface<Fo>
|
||||
{
|
||||
/// Builds the graphics pipeline.
|
||||
// TODO: replace Box<PipelineLayoutAbstract> with a PipelineUnion struct without template params
|
||||
pub fn build(self, device: Arc<Device>) -> Result<GraphicsPipeline<Vdef, Box<PipelineLayoutAbstract + Send + Sync>, Rp>, GraphicsPipelineCreationError> {
|
||||
pub fn build(self, device: Arc<Device>)
|
||||
-> Result<GraphicsPipeline<Vdef, Box<PipelineLayoutAbstract + Send + Sync>, Rp>,
|
||||
GraphicsPipelineCreationError> {
|
||||
// TODO: return errors instead of panicking if missing param
|
||||
GraphicsPipeline::with_tessellation_and_geometry(device, GraphicsPipelineParams {
|
||||
GraphicsPipeline::with_tessellation_and_geometry(device,
|
||||
GraphicsPipelineParams {
|
||||
vertex_input: self.vertex_input,
|
||||
vertex_shader: self.vertex_shader.expect("Vertex shader not specified in the builder"),
|
||||
vertex_shader:
|
||||
self.vertex_shader
|
||||
.expect("Vertex shader not \
|
||||
specified in the \
|
||||
builder"),
|
||||
input_assembly: self.input_assembly,
|
||||
tessellation: self.tessellation,
|
||||
geometry_shader: self.geometry_shader,
|
||||
viewport: self.viewport.expect("Viewport state not specified in the builder"),
|
||||
viewport:
|
||||
self.viewport
|
||||
.expect("Viewport state not \
|
||||
specified in the \
|
||||
builder"),
|
||||
raster: self.raster,
|
||||
multisample: self.multisample,
|
||||
fragment_shader: self.fragment_shader.expect("Fragment shader not specified in the builder"),
|
||||
fragment_shader:
|
||||
self.fragment_shader
|
||||
.expect("Fragment shader not \
|
||||
specified in the \
|
||||
builder"),
|
||||
depth_stencil: self.depth_stencil,
|
||||
blend: self.blend,
|
||||
render_pass: self.render_pass.expect("Render pass not specified in the builder"),
|
||||
render_pass:
|
||||
self.render_pass
|
||||
.expect("Render pass not \
|
||||
specified in the \
|
||||
builder"),
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: add build_with_cache method
|
||||
}
|
||||
|
||||
impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi,
|
||||
Fo, Fl, Rp>
|
||||
GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
|
||||
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
|
||||
{
|
||||
impl<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp>
|
||||
GraphicsPipelineBuilder<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp> {
|
||||
// TODO: add pipeline derivate system
|
||||
|
||||
/// Sets the vertex input.
|
||||
#[inline]
|
||||
pub fn vertex_input<T>(self, vertex_input: T)
|
||||
-> GraphicsPipelineBuilder<'a, T, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
|
||||
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
|
||||
{
|
||||
-> GraphicsPipelineBuilder<'a,
|
||||
T,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp> {
|
||||
GraphicsPipelineBuilder {
|
||||
vertex_input: vertex_input,
|
||||
vertex_shader: self.vertex_shader,
|
||||
@ -173,20 +335,61 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
/// vertex.
|
||||
#[inline]
|
||||
pub fn vertex_input_single_buffer<V>(self)
|
||||
-> GraphicsPipelineBuilder<'a, SingleBufferDefinition<V>, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
|
||||
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
|
||||
{
|
||||
-> GraphicsPipelineBuilder<'a,
|
||||
SingleBufferDefinition<V>,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp> {
|
||||
self.vertex_input(SingleBufferDefinition::<V>::new())
|
||||
}
|
||||
|
||||
/// Sets the vertex shader to use.
|
||||
// TODO: correct specialization constants
|
||||
#[inline]
|
||||
pub fn vertex_shader<Vi2, Vo2, Vl2>(self, shader: VertexShaderEntryPoint<'a, (), Vi2, Vo2, Vl2>,
|
||||
pub fn vertex_shader<Vi2, Vo2, Vl2>(self,
|
||||
shader: VertexShaderEntryPoint<'a, (), Vi2, Vo2, Vl2>,
|
||||
specialization_constants: ())
|
||||
-> GraphicsPipelineBuilder<'a, Vdef, (), Vi2, Vo2, Vl2, Tcs, Tci, Tco,
|
||||
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
|
||||
{
|
||||
-> GraphicsPipelineBuilder<'a,
|
||||
Vdef,
|
||||
(),
|
||||
Vi2,
|
||||
Vo2,
|
||||
Vl2,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp> {
|
||||
GraphicsPipelineBuilder {
|
||||
vertex_input: self.vertex_input,
|
||||
vertex_shader: Some(shader),
|
||||
@ -357,11 +560,32 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
/// Sets the geometry shader to use.
|
||||
// TODO: correct specialization constants
|
||||
#[inline]
|
||||
pub fn geometry_shader<Gi2, Go2, Gl2>(self, shader: GeometryShaderEntryPoint<'a, (), Gi2, Go2, Gl2>,
|
||||
pub fn geometry_shader<Gi2, Go2, Gl2>(self,
|
||||
shader: GeometryShaderEntryPoint<'a, (), Gi2, Go2, Gl2>,
|
||||
specialization_constants: ())
|
||||
-> GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
|
||||
Tcl, Tes, Tei, Teo, Tel, (), Gi2, Go2, Gl2, Fs, Fi, Fo, Fl, Rp>
|
||||
{
|
||||
-> GraphicsPipelineBuilder<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
(),
|
||||
Gi2,
|
||||
Go2,
|
||||
Gl2,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp> {
|
||||
GraphicsPipelineBuilder {
|
||||
vertex_input: self.vertex_input,
|
||||
vertex_shader: self.vertex_shader,
|
||||
@ -409,9 +633,8 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
pub fn viewports_dynamic_scissors_fixed<I>(mut self, scissors: I) -> Self
|
||||
where I: IntoIterator<Item = Scissor>
|
||||
{
|
||||
self.viewport = Some(ViewportsState::DynamicViewports {
|
||||
scissors: scissors.into_iter().collect()
|
||||
});
|
||||
self.viewport =
|
||||
Some(ViewportsState::DynamicViewports { scissors: scissors.into_iter().collect() });
|
||||
self
|
||||
}
|
||||
|
||||
@ -420,7 +643,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
#[inline]
|
||||
pub fn viewports_dynamic_scissors_irrelevant(mut self, num: u32) -> Self {
|
||||
self.viewport = Some(ViewportsState::DynamicViewports {
|
||||
scissors: (0 .. num).map(|_| Scissor::irrelevant()).collect()
|
||||
scissors: (0 .. num).map(|_| Scissor::irrelevant()).collect(),
|
||||
});
|
||||
self
|
||||
}
|
||||
@ -431,9 +654,8 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
pub fn viewports_fixed_scissors_dynamic<I>(mut self, viewports: I) -> Self
|
||||
where I: IntoIterator<Item = Viewport>
|
||||
{
|
||||
self.viewport = Some(ViewportsState::DynamicScissors {
|
||||
viewports: viewports.into_iter().collect()
|
||||
});
|
||||
self.viewport =
|
||||
Some(ViewportsState::DynamicScissors { viewports: viewports.into_iter().collect() });
|
||||
self
|
||||
}
|
||||
|
||||
@ -441,9 +663,7 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
/// drawing.
|
||||
#[inline]
|
||||
pub fn viewports_scissors_dynamic(mut self, num: u32) -> Self {
|
||||
self.viewport = Some(ViewportsState::Dynamic {
|
||||
num: num
|
||||
});
|
||||
self.viewport = Some(ViewportsState::Dynamic { num: num });
|
||||
self
|
||||
}
|
||||
|
||||
@ -562,11 +782,32 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
/// The fragment shader is run once for each pixel that is covered by each primitive.
|
||||
// TODO: correct specialization constants
|
||||
#[inline]
|
||||
pub fn fragment_shader<Fi2, Fo2, Fl2>(self, shader: FragmentShaderEntryPoint<'a, (), Fi2, Fo2, Fl2>,
|
||||
pub fn fragment_shader<Fi2, Fo2, Fl2>(self,
|
||||
shader: FragmentShaderEntryPoint<'a, (), Fi2, Fo2, Fl2>,
|
||||
specialization_constants: ())
|
||||
-> GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
|
||||
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, (), Fi2, Fo2, Fl2, Rp>
|
||||
{
|
||||
-> GraphicsPipelineBuilder<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
(),
|
||||
Fi2,
|
||||
Fo2,
|
||||
Fl2,
|
||||
Rp> {
|
||||
GraphicsPipelineBuilder {
|
||||
vertex_input: self.vertex_input,
|
||||
vertex_shader: self.vertex_shader,
|
||||
@ -680,9 +921,29 @@ impl<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gs, Gi,
|
||||
/// Sets the render pass subpass to use.
|
||||
#[inline]
|
||||
pub fn render_pass<Rp2>(self, subpass: Subpass<Rp2>)
|
||||
-> GraphicsPipelineBuilder<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco,
|
||||
Tcl, Tes, Tei, Teo, Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp2>
|
||||
{
|
||||
-> GraphicsPipelineBuilder<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp2> {
|
||||
GraphicsPipelineBuilder {
|
||||
vertex_input: self.vertex_input,
|
||||
vertex_shader: self.vertex_shader,
|
||||
|
@ -11,6 +11,7 @@
|
||||
// to avoid duplicating code, so we hide the warnings for now
|
||||
#![allow(deprecated)]
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
@ -18,45 +19,44 @@ use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::u32;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use Error;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use buffer::BufferAccess;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use check_errors;
|
||||
use descriptor::PipelineLayoutAbstract;
|
||||
use descriptor::descriptor::DescriptorDesc;
|
||||
use descriptor::descriptor_set::UnsafeDescriptorSetLayout;
|
||||
use descriptor::pipeline_layout::EmptyPipelineDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayout;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDesc;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescNames;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescUnion;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescPcRange;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSuperset;
|
||||
use descriptor::pipeline_layout::PipelineLayoutDescUnion;
|
||||
use descriptor::pipeline_layout::PipelineLayoutNotSupersetError;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSuperset;
|
||||
use descriptor::pipeline_layout::PipelineLayoutSys;
|
||||
use descriptor::pipeline_layout::EmptyPipelineDesc;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use format::ClearValue;
|
||||
use framebuffer::LayoutAttachmentDescription;
|
||||
use framebuffer::LayoutPassDescription;
|
||||
use framebuffer::LayoutPassDependencyDescription;
|
||||
use framebuffer::LayoutPassDescription;
|
||||
use framebuffer::RenderPassAbstract;
|
||||
use framebuffer::RenderPassDesc;
|
||||
use framebuffer::RenderPassDescClearValues;
|
||||
use framebuffer::RenderPassSubpassInterface;
|
||||
use framebuffer::RenderPassSys;
|
||||
use framebuffer::Subpass;
|
||||
use Error;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
use pipeline::blend::Blend;
|
||||
use pipeline::blend::AttachmentsBlend;
|
||||
use pipeline::blend::Blend;
|
||||
use pipeline::depth_stencil::Compare;
|
||||
use pipeline::depth_stencil::DepthStencil;
|
||||
use pipeline::depth_stencil::DepthBounds;
|
||||
use pipeline::depth_stencil::DepthStencil;
|
||||
use pipeline::input_assembly::InputAssembly;
|
||||
use pipeline::input_assembly::PrimitiveTopology;
|
||||
use pipeline::multisample::Multisample;
|
||||
@ -64,18 +64,18 @@ use pipeline::raster::DepthBiasControl;
|
||||
use pipeline::raster::PolygonMode;
|
||||
use pipeline::raster::Rasterization;
|
||||
use pipeline::shader::EmptyShaderInterfaceDef;
|
||||
use pipeline::shader::FragmentShaderEntryPoint;
|
||||
use pipeline::shader::GeometryShaderEntryPoint;
|
||||
use pipeline::shader::ShaderInterfaceDef;
|
||||
use pipeline::shader::ShaderInterfaceDefMatch;
|
||||
use pipeline::shader::ShaderInterfaceMismatchError;
|
||||
use pipeline::shader::VertexShaderEntryPoint;
|
||||
use pipeline::shader::TessControlShaderEntryPoint;
|
||||
use pipeline::shader::TessEvaluationShaderEntryPoint;
|
||||
use pipeline::shader::GeometryShaderEntryPoint;
|
||||
use pipeline::shader::FragmentShaderEntryPoint;
|
||||
use pipeline::shader::VertexShaderEntryPoint;
|
||||
use pipeline::vertex::IncompatibleVertexDefinitionError;
|
||||
use pipeline::vertex::SingleBufferDefinition;
|
||||
use pipeline::vertex::VertexDefinition;
|
||||
use pipeline::vertex::VertexSource;
|
||||
use pipeline::vertex::IncompatibleVertexDefinitionError;
|
||||
use pipeline::viewport::ViewportsState;
|
||||
|
||||
pub use self::builder::GraphicsPipelineBuilder;
|
||||
@ -86,8 +86,29 @@ mod builder;
|
||||
|
||||
/// Description of a `GraphicsPipeline`.
|
||||
#[deprecated = "Use the GraphicsPipelineBuilder instead"]
|
||||
pub struct GraphicsPipelineParams<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo,
|
||||
Tel, Gs, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>
|
||||
pub struct GraphicsPipelineParams<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gs,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp>
|
||||
{
|
||||
/// Describes the layout of the vertex input.
|
||||
///
|
||||
@ -106,7 +127,8 @@ pub struct GraphicsPipelineParams<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl,
|
||||
|
||||
/// Parameters of the tessellation stage. `None` if you don't want to use tessellation.
|
||||
/// If you use tessellation, you must enable the `tessellation_shader` feature on the device.
|
||||
pub tessellation: Option<GraphicsPipelineParamsTess<'a, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel>>,
|
||||
pub tessellation:
|
||||
Option<GraphicsPipelineParamsTess<'a, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel>>,
|
||||
|
||||
/// The entry point of the geometry shader. `None` if you don't want a geometry shader.
|
||||
/// If you use a geometry shader, you must enable the `geometry_shader` feature on the device.
|
||||
@ -181,14 +203,30 @@ struct Inner {
|
||||
impl GraphicsPipeline<(), (), ()> {
|
||||
/// Starts the building process of a graphics pipeline. Returns a builder object that you can
|
||||
/// fill with the various parameters.
|
||||
pub fn start<'a>() -> GraphicsPipelineBuilder<'a, SingleBufferDefinition<()>, (), (), (), (),
|
||||
(), EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef, EmptyPipelineDesc, (),
|
||||
EmptyShaderInterfaceDef, EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc, (), EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef, EmptyPipelineDesc, (),
|
||||
EmptyShaderInterfaceDef, EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc, ()>
|
||||
pub fn start<'a>()
|
||||
-> GraphicsPipelineBuilder<'a,
|
||||
SingleBufferDefinition<()>,
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyShaderInterfaceDef,
|
||||
EmptyPipelineDesc,
|
||||
()>
|
||||
{
|
||||
GraphicsPipelineBuilder::new()
|
||||
}
|
||||
@ -220,17 +258,42 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
Vo: ShaderInterfaceDef,
|
||||
Rp: RenderPassSubpassInterface<Fo>,
|
||||
{
|
||||
if let Err(err) = params.fragment_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexFragmentStagesMismatch(err));
|
||||
}
|
||||
|
||||
let pl = params.vertex_shader.layout().clone()
|
||||
let pl = params
|
||||
.vertex_shader
|
||||
.layout()
|
||||
.clone()
|
||||
.union(params.fragment_shader.layout().clone())
|
||||
.build(device.clone()).unwrap(); // TODO: error
|
||||
.build(device.clone())
|
||||
.unwrap(); // TODO: error
|
||||
|
||||
GraphicsPipeline::new_inner::<_, _, _, _, (), (), (), EmptyPipelineDesc, (), (), (),
|
||||
EmptyPipelineDesc, (), (), (), EmptyPipelineDesc, _, _, _, _>
|
||||
(device, params, pl)
|
||||
GraphicsPipeline::new_inner::<_,
|
||||
_,
|
||||
_,
|
||||
_,
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
EmptyPipelineDesc,
|
||||
(),
|
||||
(),
|
||||
(),
|
||||
EmptyPipelineDesc,
|
||||
_,
|
||||
_,
|
||||
_,
|
||||
_>(device, params, pl)
|
||||
}
|
||||
|
||||
/// Builds a new graphics pipeline object with a geometry shader.
|
||||
@ -260,15 +323,26 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
Rp: RenderPassSubpassInterface<Fo>,
|
||||
{
|
||||
if let Some(ref geometry_shader) = params.geometry_shader {
|
||||
if let Err(err) = geometry_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = geometry_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexGeometryStagesMismatch(err));
|
||||
};
|
||||
|
||||
if let Err(err) = params.fragment_shader.input().matches(geometry_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(geometry_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::GeometryFragmentStagesMismatch(err));
|
||||
}
|
||||
} else {
|
||||
if let Err(err) = params.fragment_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexFragmentStagesMismatch(err));
|
||||
}
|
||||
}
|
||||
@ -313,18 +387,32 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
Rp: RenderPassAbstract + RenderPassSubpassInterface<Fo>,
|
||||
{
|
||||
if let Some(ref tess) = params.tessellation {
|
||||
if let Err(err) = tess.tessellation_control_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = tess.tessellation_control_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexTessControlStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = tess.tessellation_evaluation_shader.input().matches(tess.tessellation_control_shader.output()) {
|
||||
if let Err(err) = tess.tessellation_evaluation_shader
|
||||
.input()
|
||||
.matches(tess.tessellation_control_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::TessControlTessEvalStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = params.fragment_shader.input().matches(tess.tessellation_evaluation_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(tess.tessellation_evaluation_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::TessEvalFragmentStagesMismatch(err));
|
||||
}
|
||||
|
||||
} else {
|
||||
if let Err(err) = params.fragment_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexFragmentStagesMismatch(err));
|
||||
}
|
||||
}
|
||||
@ -349,13 +437,53 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
// TODO: replace Box<PipelineLayoutAbstract> with a PipelineUnion struct without template params
|
||||
#[inline]
|
||||
#[deprecated = "Use the GraphicsPipelineBuilder instead"]
|
||||
pub fn with_tessellation_and_geometry<'a, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gsp, Gi,
|
||||
Go, Gl, Fs, Fi, Fo, Fl>
|
||||
(device: Arc<Device>,
|
||||
params: GraphicsPipelineParams<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes,
|
||||
Tei, Teo, Tel, Gsp, Gi, Go, Gl, Fs, Fi,
|
||||
Fo, Fl, Rp>)
|
||||
-> Result<GraphicsPipeline<Vdef, Box<PipelineLayoutAbstract + Send + Sync>, Rp>, GraphicsPipelineCreationError>
|
||||
pub fn with_tessellation_and_geometry<'a,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gsp,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl>(
|
||||
device: Arc<Device>,
|
||||
params: GraphicsPipelineParams<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gsp,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp>)
|
||||
-> Result<GraphicsPipeline<Vdef, Box<PipelineLayoutAbstract + Send + Sync>, Rp>,
|
||||
GraphicsPipelineCreationError>
|
||||
where Vdef: VertexDefinition<Vi>,
|
||||
Vl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
|
||||
Fl: PipelineLayoutDescNames + Clone + 'static + Send + Sync, // TODO: Clone + 'static + Send + Sync shouldn't be required
|
||||
@ -369,21 +497,31 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
Tco: ShaderInterfaceDef,
|
||||
Teo: ShaderInterfaceDef,
|
||||
Go: ShaderInterfaceDef,
|
||||
Fi: ShaderInterfaceDefMatch<Go> + ShaderInterfaceDefMatch<Teo> + ShaderInterfaceDefMatch<Vo>,
|
||||
Fi: ShaderInterfaceDefMatch<Go>
|
||||
+ ShaderInterfaceDefMatch<Teo>
|
||||
+ ShaderInterfaceDefMatch<Vo>,
|
||||
Fo: ShaderInterfaceDef,
|
||||
Rp: RenderPassAbstract + RenderPassSubpassInterface<Fo>,
|
||||
Rp: RenderPassAbstract + RenderPassSubpassInterface<Fo>
|
||||
{
|
||||
let pl;
|
||||
|
||||
if let Some(ref tess) = params.tessellation {
|
||||
if let Some(ref gs) = params.geometry_shader {
|
||||
if let Err(err) = tess.tessellation_control_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = tess.tessellation_control_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexTessControlStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = tess.tessellation_evaluation_shader.input().matches(tess.tessellation_control_shader.output()) {
|
||||
if let Err(err) = tess.tessellation_evaluation_shader
|
||||
.input()
|
||||
.matches(tess.tessellation_control_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::TessControlTessEvalStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = gs.input().matches(tess.tessellation_evaluation_shader.output()) {
|
||||
if let Err(err) = gs.input()
|
||||
.matches(tess.tessellation_evaluation_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::TessEvalGeometryStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = params.fragment_shader.input().matches(gs.output()) {
|
||||
@ -398,13 +536,23 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
.build(device.clone()).unwrap()) as Box<_>; // TODO: error
|
||||
|
||||
} else {
|
||||
if let Err(err) = tess.tessellation_control_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = tess.tessellation_control_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexTessControlStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = tess.tessellation_evaluation_shader.input().matches(tess.tessellation_control_shader.output()) {
|
||||
if let Err(err) = tess.tessellation_evaluation_shader
|
||||
.input()
|
||||
.matches(tess.tessellation_control_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::TessControlTessEvalStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = params.fragment_shader.input().matches(tess.tessellation_evaluation_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(tess.tessellation_evaluation_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::TessEvalFragmentStagesMismatch(err));
|
||||
}
|
||||
|
||||
@ -417,10 +565,17 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
|
||||
} else {
|
||||
if let Some(ref geometry_shader) = params.geometry_shader {
|
||||
if let Err(err) = geometry_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = geometry_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexGeometryStagesMismatch(err));
|
||||
}
|
||||
if let Err(err) = params.fragment_shader.input().matches(geometry_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(geometry_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::GeometryFragmentStagesMismatch(err));
|
||||
}
|
||||
|
||||
@ -430,13 +585,21 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
.build(device.clone()).unwrap()) as Box<_>; // TODO: error
|
||||
|
||||
} else {
|
||||
if let Err(err) = params.fragment_shader.input().matches(params.vertex_shader.output()) {
|
||||
if let Err(err) = params
|
||||
.fragment_shader
|
||||
.input()
|
||||
.matches(params.vertex_shader.output())
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::VertexFragmentStagesMismatch(err));
|
||||
}
|
||||
|
||||
pl = Box::new(params.vertex_shader.layout().clone()
|
||||
pl = Box::new(params
|
||||
.vertex_shader
|
||||
.layout()
|
||||
.clone()
|
||||
.union(params.fragment_shader.layout().clone())
|
||||
.build(device.clone()).unwrap()) as Box<_>; // TODO: error
|
||||
.build(device.clone())
|
||||
.unwrap()) as Box<_>; // TODO: error
|
||||
}
|
||||
}
|
||||
|
||||
@ -447,11 +610,51 @@ impl<Vdef, Rp> GraphicsPipeline<Vdef, (), Rp>
|
||||
impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
where L: PipelineLayoutAbstract
|
||||
{
|
||||
fn new_inner<'a, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes, Tei, Teo, Tel, Gsp, Gi, Go, Gl, Fs,
|
||||
Fi, Fo, Fl>
|
||||
(device: Arc<Device>,
|
||||
params: GraphicsPipelineParams<'a, Vdef, Vsp, Vi, Vo, Vl, Tcs, Tci, Tco, Tcl, Tes,
|
||||
Tei, Teo, Tel, Gsp, Gi, Go, Gl, Fs, Fi, Fo, Fl, Rp>,
|
||||
fn new_inner<'a,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gsp,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl>(
|
||||
device: Arc<Device>,
|
||||
params: GraphicsPipelineParams<'a,
|
||||
Vdef,
|
||||
Vsp,
|
||||
Vi,
|
||||
Vo,
|
||||
Vl,
|
||||
Tcs,
|
||||
Tci,
|
||||
Tco,
|
||||
Tcl,
|
||||
Tes,
|
||||
Tei,
|
||||
Teo,
|
||||
Tel,
|
||||
Gsp,
|
||||
Gi,
|
||||
Go,
|
||||
Gl,
|
||||
Fs,
|
||||
Fi,
|
||||
Fo,
|
||||
Fl,
|
||||
Rp>,
|
||||
pipeline_layout: L)
|
||||
-> Result<GraphicsPipeline<Vdef, L, Rp>, GraphicsPipelineCreationError>
|
||||
where Vdef: VertexDefinition<Vi>,
|
||||
@ -461,7 +664,7 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
Gl: PipelineLayoutDescNames,
|
||||
Tcl: PipelineLayoutDescNames,
|
||||
Tel: PipelineLayoutDescNames,
|
||||
Rp: RenderPassAbstract + RenderPassDesc + RenderPassSubpassInterface<Fo>,
|
||||
Rp: RenderPassAbstract + RenderPassDesc + RenderPassSubpassInterface<Fo>
|
||||
{
|
||||
let vk = device.pointers();
|
||||
|
||||
@ -472,14 +675,14 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
params.fragment_shader.layout())?;
|
||||
if let Some(ref geometry_shader) = params.geometry_shader {
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
geometry_shader.layout())?;
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout, geometry_shader.layout())?;
|
||||
}
|
||||
if let Some(ref tess) = params.tessellation {
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
tess.tessellation_control_shader.layout())?;
|
||||
PipelineLayoutSuperset::ensure_superset_of(&pipeline_layout,
|
||||
tess.tessellation_evaluation_shader.layout())?;
|
||||
tess.tessellation_evaluation_shader
|
||||
.layout())?;
|
||||
}
|
||||
|
||||
// Check that the subpass can accept the output of the fragment shader.
|
||||
@ -555,7 +758,9 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
pNext: ptr::null(),
|
||||
flags: 0, // reserved
|
||||
stage: vk::SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
|
||||
module: tess.tessellation_evaluation_shader.module().internal_object(),
|
||||
module: tess.tessellation_evaluation_shader
|
||||
.module()
|
||||
.internal_object(),
|
||||
pName: tess.tessellation_evaluation_shader.name().as_ptr(),
|
||||
pSpecializationInfo: ptr::null(), // TODO:
|
||||
});
|
||||
@ -566,11 +771,19 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
|
||||
// Vertex bindings.
|
||||
let (binding_descriptions, attribute_descriptions) = {
|
||||
let (buffers_iter, attribs_iter) = try!(params.vertex_input.definition(params.vertex_shader.input_definition()));
|
||||
let (buffers_iter, attribs_iter) =
|
||||
params
|
||||
.vertex_input
|
||||
.definition(params.vertex_shader.input_definition())?;
|
||||
|
||||
let mut binding_descriptions = SmallVec::<[_; 8]>::new();
|
||||
for (num, stride, rate) in buffers_iter {
|
||||
if stride > device.physical_device().limits().max_vertex_input_binding_stride() as usize {
|
||||
if stride >
|
||||
device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_vertex_input_binding_stride() as usize
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::MaxVertexInputBindingStrideExceeded {
|
||||
binding: num as usize,
|
||||
max: device.physical_device().limits().max_vertex_input_binding_stride() as usize,
|
||||
@ -589,14 +802,22 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
for (loc, binding, info) in attribs_iter {
|
||||
// TODO: check attribute format support
|
||||
|
||||
if info.offset > device.physical_device().limits().max_vertex_input_attribute_offset() as usize {
|
||||
if info.offset >
|
||||
device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_vertex_input_attribute_offset() as usize
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::MaxVertexInputAttributeOffsetExceeded {
|
||||
max: device.physical_device().limits().max_vertex_input_attribute_offset() as usize,
|
||||
obtained: info.offset,
|
||||
});
|
||||
}
|
||||
|
||||
debug_assert!(binding_descriptions.iter().find(|b| b.binding == binding).is_some());
|
||||
debug_assert!(binding_descriptions
|
||||
.iter()
|
||||
.find(|b| b.binding == binding)
|
||||
.is_some());
|
||||
|
||||
attribute_descriptions.push(vk::VertexInputAttributeDescription {
|
||||
location: loc as u32,
|
||||
@ -609,20 +830,34 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
(binding_descriptions, attribute_descriptions)
|
||||
};
|
||||
|
||||
if binding_descriptions.len() > device.physical_device().limits()
|
||||
if binding_descriptions.len() >
|
||||
device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_vertex_input_bindings() as usize
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::MaxVertexInputBindingsExceeded {
|
||||
max: device.physical_device().limits().max_vertex_input_bindings() as usize,
|
||||
max: device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_vertex_input_bindings() as
|
||||
usize,
|
||||
obtained: binding_descriptions.len(),
|
||||
});
|
||||
}
|
||||
|
||||
if attribute_descriptions.len() > device.physical_device().limits()
|
||||
if attribute_descriptions.len() >
|
||||
device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_vertex_input_attributes() as usize
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::MaxVertexInputAttributesExceeded {
|
||||
max: device.physical_device().limits().max_vertex_input_attributes() as usize,
|
||||
max: device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_vertex_input_attributes() as
|
||||
usize,
|
||||
obtained: attribute_descriptions.len(),
|
||||
});
|
||||
}
|
||||
@ -641,7 +876,7 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
!params.input_assembly.topology.supports_primitive_restart()
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::PrimitiveDoesntSupportPrimitiveRestart {
|
||||
primitive: params.input_assembly.topology
|
||||
primitive: params.input_assembly.topology,
|
||||
});
|
||||
}
|
||||
|
||||
@ -669,7 +904,10 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
if params.tessellation.is_none() {
|
||||
return Err(GraphicsPipelineCreationError::InvalidPrimitiveTopology);
|
||||
}
|
||||
if vertices_per_patch > device.physical_device().limits()
|
||||
if vertices_per_patch >
|
||||
device
|
||||
.physical_device()
|
||||
.limits()
|
||||
.max_tessellation_patch_size()
|
||||
{
|
||||
return Err(GraphicsPipelineCreationError::MaxTessellationPatchSizeExceeded);
|
||||
@ -688,25 +926,31 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let (vp_vp, vp_sc, vp_num) = match params.viewport {
|
||||
ViewportsState::Fixed { ref data } => (
|
||||
data.iter().map(|e| e.0.clone().into()).collect::<SmallVec<[vk::Viewport; 4]>>(),
|
||||
data.iter().map(|e| e.1.clone().into()).collect::<SmallVec<[vk::Rect2D; 4]>>(),
|
||||
data.len() as u32
|
||||
),
|
||||
ViewportsState::Fixed { ref data } => (data.iter()
|
||||
.map(|e| e.0.clone().into())
|
||||
.collect::<SmallVec<[vk::Viewport; 4]>>(),
|
||||
data.iter()
|
||||
.map(|e| e.1.clone().into())
|
||||
.collect::<SmallVec<[vk::Rect2D; 4]>>(),
|
||||
data.len() as u32),
|
||||
ViewportsState::DynamicViewports { ref scissors } => {
|
||||
let num = scissors.len() as u32;
|
||||
let scissors = scissors.iter().map(|e| e.clone().into())
|
||||
let scissors = scissors
|
||||
.iter()
|
||||
.map(|e| e.clone().into())
|
||||
.collect::<SmallVec<[vk::Rect2D; 4]>>();
|
||||
dynamic_states.push(vk::DYNAMIC_STATE_VIEWPORT);
|
||||
(SmallVec::new(), scissors, num)
|
||||
},
|
||||
ViewportsState::DynamicScissors { ref viewports } => {
|
||||
let num = viewports.len() as u32;
|
||||
let viewports = viewports.iter().map(|e| e.clone().into())
|
||||
let viewports = viewports
|
||||
.iter()
|
||||
.map(|e| e.clone().into())
|
||||
.collect::<SmallVec<[vk::Viewport; 4]>>();
|
||||
dynamic_states.push(vk::DYNAMIC_STATE_SCISSOR);
|
||||
(viewports, SmallVec::new(), num)
|
||||
@ -750,9 +994,17 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
pNext: ptr::null(),
|
||||
flags: 0, // reserved
|
||||
viewportCount: vp_num,
|
||||
pViewports: if vp_vp.is_empty() { ptr::null() } else { vp_vp.as_ptr() }, // validation layer crashes if you just pass the pointer
|
||||
pViewports: if vp_vp.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
vp_vp.as_ptr()
|
||||
}, // validation layer crashes if you just pass the pointer
|
||||
scissorCount: vp_num,
|
||||
pScissors: if vp_sc.is_empty() { ptr::null() } else { vp_sc.as_ptr() }, // validation layer crashes if you just pass the pointer
|
||||
pScissors: if vp_sc.is_empty() {
|
||||
ptr::null()
|
||||
} else {
|
||||
vp_sc.as_ptr()
|
||||
}, // validation layer crashes if you just pass the pointer
|
||||
};
|
||||
|
||||
if let Some(line_width) = params.raster.line_width {
|
||||
@ -794,8 +1046,16 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
sType: vk::STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
flags: 0, // reserved
|
||||
depthClampEnable: if params.raster.depth_clamp { vk::TRUE } else { vk::FALSE },
|
||||
rasterizerDiscardEnable: if params.raster.rasterizer_discard { vk::TRUE } else { vk::FALSE },
|
||||
depthClampEnable: if params.raster.depth_clamp {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
rasterizerDiscardEnable: if params.raster.rasterizer_discard {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
polygonMode: params.raster.polygon_mode as u32,
|
||||
cullMode: params.raster.cull_mode as u32,
|
||||
frontFace: params.raster.front_face as u32,
|
||||
@ -808,17 +1068,31 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
|
||||
assert!(params.multisample.rasterization_samples >= 1);
|
||||
// FIXME: check that rasterization_samples is equal to what's in the renderpass
|
||||
if let Some(s) = params.multisample.sample_shading { assert!(s >= 0.0 && s <= 1.0); }
|
||||
if let Some(s) = params.multisample.sample_shading {
|
||||
assert!(s >= 0.0 && s <= 1.0);
|
||||
}
|
||||
let multisample = vk::PipelineMultisampleStateCreateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
flags: 0, // reserved
|
||||
rasterizationSamples: params.multisample.rasterization_samples,
|
||||
sampleShadingEnable: if params.multisample.sample_shading.is_some() { vk::TRUE } else { vk::FALSE },
|
||||
sampleShadingEnable: if params.multisample.sample_shading.is_some() {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
minSampleShading: params.multisample.sample_shading.unwrap_or(1.0),
|
||||
pSampleMask: ptr::null(), //params.multisample.sample_mask.as_ptr(), // FIXME:
|
||||
alphaToCoverageEnable: if params.multisample.alpha_to_coverage { vk::TRUE } else { vk::FALSE },
|
||||
alphaToOneEnable: if params.multisample.alpha_to_one { vk::TRUE } else { vk::FALSE },
|
||||
alphaToCoverageEnable: if params.multisample.alpha_to_coverage {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
alphaToOneEnable: if params.multisample.alpha_to_one {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
};
|
||||
|
||||
let depth_stencil = {
|
||||
@ -843,33 +1117,30 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
};
|
||||
|
||||
match (params.depth_stencil.stencil_front.compare_mask,
|
||||
params.depth_stencil.stencil_back.compare_mask)
|
||||
{
|
||||
params.depth_stencil.stencil_back.compare_mask) {
|
||||
(Some(_), Some(_)) => (),
|
||||
(None, None) => {
|
||||
dynamic_states.push(vk::DYNAMIC_STATE_STENCIL_COMPARE_MASK);
|
||||
},
|
||||
_ => return Err(GraphicsPipelineCreationError::WrongStencilState)
|
||||
_ => return Err(GraphicsPipelineCreationError::WrongStencilState),
|
||||
};
|
||||
|
||||
match (params.depth_stencil.stencil_front.write_mask,
|
||||
params.depth_stencil.stencil_back.write_mask)
|
||||
{
|
||||
params.depth_stencil.stencil_back.write_mask) {
|
||||
(Some(_), Some(_)) => (),
|
||||
(None, None) => {
|
||||
dynamic_states.push(vk::DYNAMIC_STATE_STENCIL_WRITE_MASK);
|
||||
},
|
||||
_ => return Err(GraphicsPipelineCreationError::WrongStencilState)
|
||||
_ => return Err(GraphicsPipelineCreationError::WrongStencilState),
|
||||
};
|
||||
|
||||
match (params.depth_stencil.stencil_front.reference,
|
||||
params.depth_stencil.stencil_back.reference)
|
||||
{
|
||||
params.depth_stencil.stencil_back.reference) {
|
||||
(Some(_), Some(_)) => (),
|
||||
(None, None) => {
|
||||
dynamic_states.push(vk::DYNAMIC_STATE_STENCIL_REFERENCE);
|
||||
},
|
||||
_ => return Err(GraphicsPipelineCreationError::WrongStencilState)
|
||||
_ => return Err(GraphicsPipelineCreationError::WrongStencilState),
|
||||
};
|
||||
|
||||
if params.depth_stencil.depth_write && !params.render_pass.has_writable_depth() {
|
||||
@ -897,21 +1168,40 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
flags: 0, // reserved
|
||||
depthTestEnable: if !params.depth_stencil.depth_write &&
|
||||
params.depth_stencil.depth_compare == Compare::Always
|
||||
{ vk::FALSE } else { vk::TRUE },
|
||||
depthWriteEnable: if params.depth_stencil.depth_write { vk::TRUE }
|
||||
else { vk::FALSE },
|
||||
{
|
||||
vk::FALSE
|
||||
} else {
|
||||
vk::TRUE
|
||||
},
|
||||
depthWriteEnable: if params.depth_stencil.depth_write {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
depthCompareOp: params.depth_stencil.depth_compare as u32,
|
||||
depthBoundsTestEnable: db.0,
|
||||
stencilTestEnable: if params.depth_stencil.stencil_front.always_keep() &&
|
||||
params.depth_stencil.stencil_back.always_keep()
|
||||
{ vk::FALSE } else { vk::TRUE },
|
||||
{
|
||||
vk::FALSE
|
||||
} else {
|
||||
vk::TRUE
|
||||
},
|
||||
front: vk::StencilOpState {
|
||||
failOp: params.depth_stencil.stencil_front.fail_op as u32,
|
||||
passOp: params.depth_stencil.stencil_front.pass_op as u32,
|
||||
depthFailOp: params.depth_stencil.stencil_front.depth_fail_op as u32,
|
||||
compareOp: params.depth_stencil.stencil_front.compare as u32,
|
||||
compareMask: params.depth_stencil.stencil_front.compare_mask.unwrap_or(u32::MAX),
|
||||
writeMask: params.depth_stencil.stencil_front.write_mask.unwrap_or(u32::MAX),
|
||||
compareMask: params
|
||||
.depth_stencil
|
||||
.stencil_front
|
||||
.compare_mask
|
||||
.unwrap_or(u32::MAX),
|
||||
writeMask: params
|
||||
.depth_stencil
|
||||
.stencil_front
|
||||
.write_mask
|
||||
.unwrap_or(u32::MAX),
|
||||
reference: params.depth_stencil.stencil_front.reference.unwrap_or(0),
|
||||
},
|
||||
back: vk::StencilOpState {
|
||||
@ -919,9 +1209,17 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
passOp: params.depth_stencil.stencil_back.pass_op as u32,
|
||||
depthFailOp: params.depth_stencil.stencil_back.depth_fail_op as u32,
|
||||
compareOp: params.depth_stencil.stencil_back.compare as u32,
|
||||
compareMask: params.depth_stencil.stencil_back.compare_mask.unwrap_or(u32::MAX),
|
||||
writeMask: params.depth_stencil.stencil_back.write_mask.unwrap_or(u32::MAX),
|
||||
reference: params.depth_stencil.stencil_back.reference.unwrap_or(0)
|
||||
compareMask: params
|
||||
.depth_stencil
|
||||
.stencil_back
|
||||
.compare_mask
|
||||
.unwrap_or(u32::MAX),
|
||||
writeMask: params
|
||||
.depth_stencil
|
||||
.stencil_back
|
||||
.write_mask
|
||||
.unwrap_or(u32::MAX),
|
||||
reference: params.depth_stencil.stencil_back.reference.unwrap_or(0),
|
||||
},
|
||||
minDepthBounds: db.1,
|
||||
maxDepthBounds: db.2,
|
||||
@ -969,7 +1267,7 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
} else {
|
||||
dynamic_states.push(vk::DYNAMIC_STATE_BLEND_CONSTANTS);
|
||||
[0.0, 0.0, 0.0, 0.0]
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let dynamic_states = if !dynamic_states.is_empty() {
|
||||
@ -993,14 +1291,18 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
pStages: stages.as_ptr(),
|
||||
pVertexInputState: &vertex_input_state,
|
||||
pInputAssemblyState: &input_assembly,
|
||||
pTessellationState: tessellation.as_ref().map(|t| t as *const _)
|
||||
pTessellationState: tessellation
|
||||
.as_ref()
|
||||
.map(|t| t as *const _)
|
||||
.unwrap_or(ptr::null()),
|
||||
pViewportState: &viewport_info,
|
||||
pRasterizationState: &rasterization,
|
||||
pMultisampleState: &multisample,
|
||||
pDepthStencilState: &depth_stencil,
|
||||
pColorBlendState: &blend,
|
||||
pDynamicState: dynamic_states.as_ref().map(|s| s as *const _)
|
||||
pDynamicState: dynamic_states
|
||||
.as_ref()
|
||||
.map(|s| s as *const _)
|
||||
.unwrap_or(ptr::null()),
|
||||
layout: PipelineLayoutAbstract::sys(&pipeline_layout).internal_object(),
|
||||
renderPass: params.render_pass.render_pass().inner().internal_object(),
|
||||
@ -1010,8 +1312,12 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateGraphicsPipelines(device.internal_object(), 0,
|
||||
1, &infos, ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateGraphicsPipelines(device.internal_object(),
|
||||
0,
|
||||
1,
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -1034,7 +1340,11 @@ impl<Vdef, L, Rp> GraphicsPipeline<Vdef, L, Rp>
|
||||
dynamic_scissor: params.viewport.dynamic_scissors(),
|
||||
dynamic_depth_bias: params.raster.depth_bias.is_dynamic(),
|
||||
dynamic_depth_bounds: params.depth_stencil.depth_bounds_test.is_dynamic(),
|
||||
dynamic_stencil_compare_mask: params.depth_stencil.stencil_back.compare_mask.is_none(),
|
||||
dynamic_stencil_compare_mask: params
|
||||
.depth_stencil
|
||||
.stencil_back
|
||||
.compare_mask
|
||||
.is_none(),
|
||||
dynamic_stencil_write_mask: params.depth_stencil.stencil_back.write_mask.is_none(),
|
||||
dynamic_stencil_reference: params.depth_stencil.stencil_back.reference.is_none(),
|
||||
dynamic_blend_constants: params.blend.blend_constants.is_none(),
|
||||
@ -1148,7 +1458,9 @@ unsafe impl<Mv, L, Rp> PipelineLayoutAbstract for GraphicsPipeline<Mv, L, Rp>
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Mv, L, Rp> PipelineLayoutDesc for GraphicsPipeline<Mv, L, Rp> where L: PipelineLayoutDesc {
|
||||
unsafe impl<Mv, L, Rp> PipelineLayoutDesc for GraphicsPipeline<Mv, L, Rp>
|
||||
where L: PipelineLayoutDesc
|
||||
{
|
||||
#[inline]
|
||||
fn num_sets(&self) -> usize {
|
||||
self.layout.num_sets()
|
||||
@ -1175,7 +1487,9 @@ unsafe impl<Mv, L, Rp> PipelineLayoutDesc for GraphicsPipeline<Mv, L, Rp> where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<Mv, L, Rp> PipelineLayoutDescNames for GraphicsPipeline<Mv, L, Rp> where L: PipelineLayoutDescNames {
|
||||
unsafe impl<Mv, L, Rp> PipelineLayoutDescNames for GraphicsPipeline<Mv, L, Rp>
|
||||
where L: PipelineLayoutDescNames
|
||||
{
|
||||
#[inline]
|
||||
fn descriptor_by_name(&self, name: &str) -> Option<(usize, usize)> {
|
||||
self.layout.descriptor_by_name(name)
|
||||
@ -1275,7 +1589,9 @@ pub unsafe trait GraphicsPipelineAbstract: PipelineLayoutAbstract + RenderPassAb
|
||||
}
|
||||
|
||||
unsafe impl<Mv, L, Rp> GraphicsPipelineAbstract for GraphicsPipeline<Mv, L, Rp>
|
||||
where L: PipelineLayoutAbstract, Rp: RenderPassAbstract, Mv: VertexSource<Vec<Arc<BufferAccess + Send + Sync>>>
|
||||
where L: PipelineLayoutAbstract,
|
||||
Rp: RenderPassAbstract,
|
||||
Mv: VertexSource<Vec<Arc<BufferAccess + Send + Sync>>>
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> GraphicsPipelineSys {
|
||||
@ -1284,7 +1600,8 @@ unsafe impl<Mv, L, Rp> GraphicsPipelineAbstract for GraphicsPipeline<Mv, L, Rp>
|
||||
}
|
||||
|
||||
unsafe impl<T> GraphicsPipelineAbstract for T
|
||||
where T: SafeDeref, T::Target: GraphicsPipelineAbstract
|
||||
where T: SafeDeref,
|
||||
T::Target: GraphicsPipelineAbstract
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> GraphicsPipelineSys {
|
||||
@ -1312,9 +1629,9 @@ unsafe impl<Mv, L, Rp, I> VertexDefinition<I> for GraphicsPipeline<Mv, L, Rp>
|
||||
type AttribsIter = <Mv as VertexDefinition<I>>::AttribsIter;
|
||||
|
||||
#[inline]
|
||||
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
|
||||
IncompatibleVertexDefinitionError>
|
||||
{
|
||||
fn definition(
|
||||
&self, interface: &I)
|
||||
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
|
||||
self.vertex_definition.definition(interface)
|
||||
}
|
||||
}
|
||||
@ -1407,7 +1724,7 @@ pub enum GraphicsPipelineCreationError {
|
||||
/// The user requested to use primitive restart, but the primitive topology doesn't support it.
|
||||
PrimitiveDoesntSupportPrimitiveRestart {
|
||||
/// The topology that doesn't support primitive restart.
|
||||
primitive: PrimitiveTopology
|
||||
primitive: PrimitiveTopology,
|
||||
},
|
||||
|
||||
/// The `multi_viewport` feature must be enabled in order to use multiple viewports at once.
|
||||
@ -1418,7 +1735,7 @@ pub enum GraphicsPipelineCreationError {
|
||||
/// Maximum allowed value.
|
||||
max: u32,
|
||||
/// Value that was passed.
|
||||
obtained: u32
|
||||
obtained: u32,
|
||||
},
|
||||
|
||||
/// The maximum dimensions of viewports has been exceeded.
|
||||
@ -1625,7 +1942,7 @@ impl error::Error for GraphicsPipelineCreationError {
|
||||
GraphicsPipelineCreationError::TessEvalFragmentStagesMismatch(ref err) => Some(err),
|
||||
GraphicsPipelineCreationError::GeometryFragmentStagesMismatch(ref err) => Some(err),
|
||||
GraphicsPipelineCreationError::IncompatibleVertexDefinition(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1668,7 +1985,7 @@ impl From<Error> for GraphicsPipelineCreationError {
|
||||
err @ Error::OutOfDeviceMemory => {
|
||||
GraphicsPipelineCreationError::OomError(OomError::from(err))
|
||||
},
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
//!
|
||||
//! The input assembly is the stage where lists of vertices are turned into primitives.
|
||||
//!
|
||||
|
||||
use vk;
|
||||
|
||||
/// How the input assembly stage should behave.
|
||||
@ -67,10 +68,14 @@ impl Into<vk::PrimitiveTopology> for PrimitiveTopology {
|
||||
PrimitiveTopology::TriangleList => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
|
||||
PrimitiveTopology::TriangleStrip => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
|
||||
PrimitiveTopology::TriangleFan => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_FAN,
|
||||
PrimitiveTopology::LineListWithAdjacency => vk::PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
|
||||
PrimitiveTopology::LineStripWithAdjacency => vk::PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY,
|
||||
PrimitiveTopology::TriangleListWithAdjacency => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
|
||||
PrimitiveTopology::TriangleStripWithAdjacency => vk::PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY,
|
||||
PrimitiveTopology::LineListWithAdjacency =>
|
||||
vk::PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
|
||||
PrimitiveTopology::LineStripWithAdjacency =>
|
||||
vk::PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY,
|
||||
PrimitiveTopology::TriangleListWithAdjacency =>
|
||||
vk::PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
|
||||
PrimitiveTopology::TriangleStripWithAdjacency =>
|
||||
vk::PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY,
|
||||
PrimitiveTopology::PatchList { .. } => vk::PRIMITIVE_TOPOLOGY_PATCH_LIST,
|
||||
}
|
||||
}
|
||||
@ -86,7 +91,7 @@ impl PrimitiveTopology {
|
||||
PrimitiveTopology::TriangleFan => true,
|
||||
PrimitiveTopology::LineStripWithAdjacency => true,
|
||||
PrimitiveTopology::TriangleStripWithAdjacency => true,
|
||||
_ => false
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
//! The rasterization is the stage when collections of triangles are turned into collections
|
||||
//! of pixels or samples.
|
||||
//!
|
||||
|
||||
use vk;
|
||||
|
||||
/// State of the rasterizer.
|
||||
@ -71,7 +72,7 @@ impl DepthBiasControl {
|
||||
pub fn is_dynamic(&self) -> bool {
|
||||
match *self {
|
||||
DepthBiasControl::Dynamic => true,
|
||||
_ => false
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::error;
|
||||
use std::ffi::CStr;
|
||||
use std::fmt;
|
||||
use std::iter;
|
||||
use std::iter::Empty as EmptyIter;
|
||||
@ -27,16 +28,15 @@ use std::mem;
|
||||
use std::ops::Range;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::ffi::CStr;
|
||||
|
||||
use format::Format;
|
||||
use pipeline::input_assembly::PrimitiveTopology;
|
||||
|
||||
use device::Device;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
use vk;
|
||||
|
||||
/// Contains SPIR-V code with one or more entry points.
|
||||
@ -44,14 +44,18 @@ use vk;
|
||||
/// Note that it is advised to wrap around a `ShaderModule` with a struct that is different for
|
||||
/// each shader.
|
||||
#[derive(Debug)]
|
||||
pub struct ShaderModule<P = Arc<Device>> where P: SafeDeref<Target = Device> {
|
||||
pub struct ShaderModule<P = Arc<Device>>
|
||||
where P: SafeDeref<Target = Device>
|
||||
{
|
||||
// The module.
|
||||
module: vk::ShaderModule,
|
||||
// Pointer to the device.
|
||||
device: P,
|
||||
}
|
||||
|
||||
impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
impl<P> ShaderModule<P>
|
||||
where P: SafeDeref<Target = Device>
|
||||
{
|
||||
/// Builds a new shader module from SPIR-V.
|
||||
///
|
||||
/// # Safety
|
||||
@ -74,8 +78,10 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
|
||||
let vk = device.pointers();
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateShaderModule(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateShaderModule(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -97,10 +103,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
/// - The input, output and layout must correctly describe the input, output and layout used
|
||||
/// by this stage.
|
||||
///
|
||||
pub unsafe fn vertex_shader_entry_point<'a, S, I, O, L>
|
||||
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> VertexShaderEntryPoint<'a, S, I, O, L, P>
|
||||
{
|
||||
pub unsafe fn vertex_shader_entry_point<'a, S, I, O, L>(
|
||||
&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> VertexShaderEntryPoint<'a, S, I, O, L, P> {
|
||||
VertexShaderEntryPoint {
|
||||
module: self,
|
||||
name: name,
|
||||
@ -123,10 +128,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
/// - The input, output and layout must correctly describe the input, output and layout used
|
||||
/// by this stage.
|
||||
///
|
||||
pub unsafe fn tess_control_shader_entry_point<'a, S, I, O, L>
|
||||
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> TessControlShaderEntryPoint<'a, S, I, O, L, P>
|
||||
{
|
||||
pub unsafe fn tess_control_shader_entry_point<'a, S, I, O, L>(
|
||||
&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> TessControlShaderEntryPoint<'a, S, I, O, L, P> {
|
||||
TessControlShaderEntryPoint {
|
||||
module: self,
|
||||
name: name,
|
||||
@ -149,10 +153,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
/// - The input, output and layout must correctly describe the input, output and layout used
|
||||
/// by this stage.
|
||||
///
|
||||
pub unsafe fn tess_evaluation_shader_entry_point<'a, S, I, O, L>
|
||||
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> TessEvaluationShaderEntryPoint<'a, S, I, O, L, P>
|
||||
{
|
||||
pub unsafe fn tess_evaluation_shader_entry_point<'a, S, I, O, L>(
|
||||
&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> TessEvaluationShaderEntryPoint<'a, S, I, O, L, P> {
|
||||
TessEvaluationShaderEntryPoint {
|
||||
module: self,
|
||||
name: name,
|
||||
@ -175,10 +178,10 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
/// - The input, output and layout must correctly describe the input, output and layout used
|
||||
/// by this stage.
|
||||
///
|
||||
pub unsafe fn geometry_shader_entry_point<'a, S, I, O, L>
|
||||
(&'a self, name: &'a CStr, primitives: GeometryShaderExecutionMode, input: I,
|
||||
output: O, layout: L) -> GeometryShaderEntryPoint<'a, S, I, O, L, P>
|
||||
{
|
||||
pub unsafe fn geometry_shader_entry_point<'a, S, I, O, L>(
|
||||
&'a self, name: &'a CStr, primitives: GeometryShaderExecutionMode, input: I, output: O,
|
||||
layout: L)
|
||||
-> GeometryShaderEntryPoint<'a, S, I, O, L, P> {
|
||||
GeometryShaderEntryPoint {
|
||||
module: self,
|
||||
name: name,
|
||||
@ -202,10 +205,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
/// - The input, output and layout must correctly describe the input, output and layout used
|
||||
/// by this stage.
|
||||
///
|
||||
pub unsafe fn fragment_shader_entry_point<'a, S, I, O, L>
|
||||
(&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> FragmentShaderEntryPoint<'a, S, I, O, L, P>
|
||||
{
|
||||
pub unsafe fn fragment_shader_entry_point<'a, S, I, O, L>(
|
||||
&'a self, name: &'a CStr, input: I, output: O, layout: L)
|
||||
-> FragmentShaderEntryPoint<'a, S, I, O, L, P> {
|
||||
FragmentShaderEntryPoint {
|
||||
module: self,
|
||||
name: name,
|
||||
@ -229,8 +231,7 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
///
|
||||
#[inline]
|
||||
pub unsafe fn compute_shader_entry_point<'a, S, L>(&'a self, name: &'a CStr, layout: L)
|
||||
-> ComputeShaderEntryPoint<'a, S, L, P>
|
||||
{
|
||||
-> ComputeShaderEntryPoint<'a, S, L, P> {
|
||||
ComputeShaderEntryPoint {
|
||||
module: self,
|
||||
name: name,
|
||||
@ -240,7 +241,9 @@ impl<P> ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<P> VulkanObject for ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
unsafe impl<P> VulkanObject for ShaderModule<P>
|
||||
where P: SafeDeref<Target = Device>
|
||||
{
|
||||
type Object = vk::ShaderModule;
|
||||
|
||||
#[inline]
|
||||
@ -249,7 +252,9 @@ unsafe impl<P> VulkanObject for ShaderModule<P> where P: SafeDeref<Target = Devi
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> Drop for ShaderModule<P> where P: SafeDeref<Target = Device> {
|
||||
impl<P> Drop for ShaderModule<P>
|
||||
where P: SafeDeref<Target = Device>
|
||||
{
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
@ -624,14 +629,17 @@ unsafe impl ShaderInterfaceDef for EmptyShaderInterfaceDef {
|
||||
|
||||
/// Extension trait for `ShaderInterfaceDef` that specifies that the interface is potentially
|
||||
/// compatible with another one.
|
||||
pub unsafe trait ShaderInterfaceDefMatch<I>: ShaderInterfaceDef where I: ShaderInterfaceDef {
|
||||
pub unsafe trait ShaderInterfaceDefMatch<I>: ShaderInterfaceDef
|
||||
where I: ShaderInterfaceDef
|
||||
{
|
||||
/// Returns `Ok` if the two definitions match.
|
||||
fn matches(&self, other: &I) -> Result<(), ShaderInterfaceMismatchError>;
|
||||
}
|
||||
|
||||
// TODO: turn this into a default impl that can be specialized
|
||||
unsafe impl<T, I> ShaderInterfaceDefMatch<I> for T
|
||||
where T: ShaderInterfaceDef, I: ShaderInterfaceDef
|
||||
where T: ShaderInterfaceDef,
|
||||
I: ShaderInterfaceDef
|
||||
{
|
||||
fn matches(&self, other: &I) -> Result<(), ShaderInterfaceMismatchError> {
|
||||
if self.elements().len() != other.elements().len() {
|
||||
@ -640,8 +648,12 @@ unsafe impl<T, I> ShaderInterfaceDefMatch<I> for T
|
||||
|
||||
for a in self.elements() {
|
||||
for loc in a.location.clone() {
|
||||
let b = match other.elements().find(|e| loc >= e.location.start && loc < e.location.end) {
|
||||
None => return Err(ShaderInterfaceMismatchError::MissingElement { location: loc }),
|
||||
let b = match other
|
||||
.elements()
|
||||
.find(|e| loc >= e.location.start && loc < e.location.end) {
|
||||
None => return Err(ShaderInterfaceMismatchError::MissingElement {
|
||||
location: loc,
|
||||
}),
|
||||
Some(b) => b,
|
||||
};
|
||||
|
||||
@ -674,11 +686,11 @@ impl error::Error for ShaderInterfaceMismatchError {
|
||||
#[inline]
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
ShaderInterfaceMismatchError::ElementsCountMismatch => "the number of elements \
|
||||
mismatches",
|
||||
ShaderInterfaceMismatchError::ElementsCountMismatch =>
|
||||
"the number of elements mismatches",
|
||||
ShaderInterfaceMismatchError::MissingElement { .. } => "an element is missing",
|
||||
ShaderInterfaceMismatchError::FormatMismatch => "the format of an element does not \
|
||||
match",
|
||||
ShaderInterfaceMismatchError::FormatMismatch =>
|
||||
"the format of an element does not match",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,14 +11,15 @@ use std::error;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use SafeDeref;
|
||||
use buffer::BufferAccess;
|
||||
use format::Format;
|
||||
use pipeline::vertex::VertexMemberTy;
|
||||
use SafeDeref;
|
||||
use vk;
|
||||
|
||||
/// Trait for types that describe the definition of the vertex input used by a graphics pipeline.
|
||||
pub unsafe trait VertexDefinition<I>: VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> {
|
||||
pub unsafe trait VertexDefinition<I>
|
||||
: VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> {
|
||||
/// Iterator that returns the offset, the stride (in bytes) and input rate of each buffer.
|
||||
type BuffersIter: ExactSizeIterator<Item = (u32, usize, InputRate)>;
|
||||
/// Iterator that returns the attribute location, buffer id, and infos.
|
||||
@ -26,18 +27,22 @@ pub unsafe trait VertexDefinition<I>: VertexSource<Vec<Arc<BufferAccess + Send +
|
||||
|
||||
/// Builds the vertex definition to use to link this definition to a vertex shader's input
|
||||
/// interface.
|
||||
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
|
||||
IncompatibleVertexDefinitionError>;
|
||||
fn definition(
|
||||
&self, interface: &I)
|
||||
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError>;
|
||||
}
|
||||
|
||||
unsafe impl<I, T> VertexDefinition<I> for T where T: SafeDeref, T::Target: VertexDefinition<I> {
|
||||
unsafe impl<I, T> VertexDefinition<I> for T
|
||||
where T: SafeDeref,
|
||||
T::Target: VertexDefinition<I>
|
||||
{
|
||||
type BuffersIter = <T::Target as VertexDefinition<I>>::BuffersIter;
|
||||
type AttribsIter = <T::Target as VertexDefinition<I>>::AttribsIter;
|
||||
|
||||
#[inline]
|
||||
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
|
||||
IncompatibleVertexDefinitionError>
|
||||
{
|
||||
fn definition(
|
||||
&self, interface: &I)
|
||||
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
|
||||
(**self).definition(interface)
|
||||
}
|
||||
}
|
||||
@ -111,7 +116,10 @@ pub unsafe trait VertexSource<L> {
|
||||
fn decode(&self, L) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize);
|
||||
}
|
||||
|
||||
unsafe impl<L, T> VertexSource<L> for T where T: SafeDeref, T::Target: VertexSource<L> {
|
||||
unsafe impl<L, T> VertexSource<L> for T
|
||||
where T: SafeDeref,
|
||||
T::Target: VertexSource<L>
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, list: L) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
(**self).decode(list)
|
||||
|
@ -28,18 +28,22 @@ pub struct OneVertexOneInstanceDefinition<T, U>(pub PhantomData<(T, U)>);
|
||||
|
||||
impl<T, U> OneVertexOneInstanceDefinition<T, U> {
|
||||
#[inline]
|
||||
pub fn new() -> OneVertexOneInstanceDefinition<T, U> { OneVertexOneInstanceDefinition(PhantomData) }
|
||||
pub fn new() -> OneVertexOneInstanceDefinition<T, U> {
|
||||
OneVertexOneInstanceDefinition(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, U, I> VertexDefinition<I> for OneVertexOneInstanceDefinition<T, U>
|
||||
where T: Vertex, U: Vertex, I: ShaderInterfaceDef
|
||||
where T: Vertex,
|
||||
U: Vertex,
|
||||
I: ShaderInterfaceDef
|
||||
{
|
||||
type BuffersIter = VecIntoIter<(u32, usize, InputRate)>;
|
||||
type AttribsIter = VecIntoIter<(u32, u32, AttributeInfo)>;
|
||||
|
||||
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
|
||||
IncompatibleVertexDefinitionError>
|
||||
{
|
||||
fn definition(
|
||||
&self, interface: &I)
|
||||
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
|
||||
let attrib = {
|
||||
let mut attribs = Vec::with_capacity(interface.elements().len());
|
||||
for e in interface.elements() {
|
||||
@ -51,23 +55,29 @@ unsafe impl<T, U, I> VertexDefinition<I> for OneVertexOneInstanceDefinition<T, U
|
||||
(infos, 1)
|
||||
} else {
|
||||
return Err(IncompatibleVertexDefinitionError::MissingAttribute {
|
||||
attribute: name.clone().into_owned()
|
||||
attribute: name.clone().into_owned(),
|
||||
});
|
||||
};
|
||||
|
||||
if !infos.ty.matches(infos.array_size, e.format,
|
||||
if !infos.ty.matches(infos.array_size,
|
||||
e.format,
|
||||
e.location.end - e.location.start)
|
||||
{
|
||||
return Err(IncompatibleVertexDefinitionError::FormatMismatch {
|
||||
attribute: name.clone().into_owned(),
|
||||
shader: (e.format, (e.location.end - e.location.start) as usize),
|
||||
definition: (infos.ty, infos.array_size),
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let mut offset = infos.offset;
|
||||
for loc in e.location.clone() {
|
||||
attribs.push((loc, buf_offset, AttributeInfo { offset: offset, format: e.format }));
|
||||
attribs.push((loc,
|
||||
buf_offset,
|
||||
AttributeInfo {
|
||||
offset: offset,
|
||||
format: e.format,
|
||||
}));
|
||||
offset += e.format.size().unwrap();
|
||||
}
|
||||
}
|
||||
@ -76,18 +86,21 @@ unsafe impl<T, U, I> VertexDefinition<I> for OneVertexOneInstanceDefinition<T, U
|
||||
|
||||
let buffers = vec![
|
||||
(0, mem::size_of::<T>(), InputRate::Vertex),
|
||||
(1, mem::size_of::<U>(), InputRate::Instance)
|
||||
(1, mem::size_of::<U>(), InputRate::Instance),
|
||||
].into_iter();
|
||||
|
||||
Ok((buffers, attrib))
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for OneVertexOneInstanceDefinition<T, U>
|
||||
where T: Vertex, U: Vertex
|
||||
unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>>
|
||||
for OneVertexOneInstanceDefinition<T, U>
|
||||
where T: Vertex,
|
||||
U: Vertex
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>)
|
||||
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
// FIXME: safety
|
||||
assert_eq!(source.len(), 2);
|
||||
let len = source[0].size() / mem::size_of::<T>();
|
||||
@ -99,8 +112,10 @@ unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for OneVert
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, U, Bt, Bu> VertexSource<(Bt, Bu)> for OneVertexOneInstanceDefinition<T, U>
|
||||
where T: Vertex, Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
|
||||
U: Vertex, Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
|
||||
where T: Vertex,
|
||||
Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
|
||||
U: Vertex,
|
||||
Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, source: (Bt, Bu)) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
|
@ -28,18 +28,21 @@ pub struct SingleBufferDefinition<T>(pub PhantomData<T>);
|
||||
|
||||
impl<T> SingleBufferDefinition<T> {
|
||||
#[inline]
|
||||
pub fn new() -> SingleBufferDefinition<T> { SingleBufferDefinition(PhantomData) }
|
||||
pub fn new() -> SingleBufferDefinition<T> {
|
||||
SingleBufferDefinition(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, I> VertexDefinition<I> for SingleBufferDefinition<T>
|
||||
where T: Vertex, I: ShaderInterfaceDef
|
||||
where T: Vertex,
|
||||
I: ShaderInterfaceDef
|
||||
{
|
||||
type BuffersIter = OptionIntoIter<(u32, usize, InputRate)>;
|
||||
type AttribsIter = VecIntoIter<(u32, u32, AttributeInfo)>;
|
||||
|
||||
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
|
||||
IncompatibleVertexDefinitionError>
|
||||
{
|
||||
fn definition(
|
||||
&self, interface: &I)
|
||||
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
|
||||
let attrib = {
|
||||
let mut attribs = Vec::with_capacity(interface.elements().len());
|
||||
for e in interface.elements() {
|
||||
@ -48,23 +51,29 @@ unsafe impl<T, I> VertexDefinition<I> for SingleBufferDefinition<T>
|
||||
let infos = match <T as Vertex>::member(name) {
|
||||
Some(m) => m,
|
||||
None => return Err(IncompatibleVertexDefinitionError::MissingAttribute {
|
||||
attribute: name.clone().into_owned()
|
||||
})
|
||||
attribute: name.clone().into_owned(),
|
||||
}),
|
||||
};
|
||||
|
||||
if !infos.ty.matches(infos.array_size, e.format,
|
||||
if !infos.ty.matches(infos.array_size,
|
||||
e.format,
|
||||
e.location.end - e.location.start)
|
||||
{
|
||||
return Err(IncompatibleVertexDefinitionError::FormatMismatch {
|
||||
attribute: name.clone().into_owned(),
|
||||
shader: (e.format, (e.location.end - e.location.start) as usize),
|
||||
definition: (infos.ty, infos.array_size),
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let mut offset = infos.offset;
|
||||
for loc in e.location.clone() {
|
||||
attribs.push((loc, 0, AttributeInfo { offset: offset, format: e.format }));
|
||||
attribs.push((loc,
|
||||
0,
|
||||
AttributeInfo {
|
||||
offset: offset,
|
||||
format: e.format,
|
||||
}));
|
||||
offset += e.format.size().unwrap();
|
||||
}
|
||||
}
|
||||
@ -80,7 +89,8 @@ unsafe impl<V> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for SingleBuff
|
||||
where V: Vertex
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
fn decode(&self, mut source: Vec<Arc<BufferAccess + Send + Sync>>)
|
||||
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
// FIXME: safety
|
||||
assert_eq!(source.len(), 1);
|
||||
let len = source[0].size() / mem::size_of::<V>();
|
||||
@ -89,7 +99,8 @@ unsafe impl<V> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for SingleBuff
|
||||
}
|
||||
|
||||
unsafe impl<'a, B, V> VertexSource<B> for SingleBufferDefinition<V>
|
||||
where B: TypedBufferAccess<Content = [V]> + Send + Sync + 'static, V: Vertex
|
||||
where B: TypedBufferAccess<Content = [V]> + Send + Sync + 'static,
|
||||
V: Vertex
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, source: B) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
|
@ -28,18 +28,22 @@ pub struct TwoBuffersDefinition<T, U>(pub PhantomData<(T, U)>);
|
||||
|
||||
impl<T, U> TwoBuffersDefinition<T, U> {
|
||||
#[inline]
|
||||
pub fn new() -> TwoBuffersDefinition<T, U> { TwoBuffersDefinition(PhantomData) }
|
||||
pub fn new() -> TwoBuffersDefinition<T, U> {
|
||||
TwoBuffersDefinition(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, U, I> VertexDefinition<I> for TwoBuffersDefinition<T, U>
|
||||
where T: Vertex, U: Vertex, I: ShaderInterfaceDef
|
||||
where T: Vertex,
|
||||
U: Vertex,
|
||||
I: ShaderInterfaceDef
|
||||
{
|
||||
type BuffersIter = VecIntoIter<(u32, usize, InputRate)>;
|
||||
type AttribsIter = VecIntoIter<(u32, u32, AttributeInfo)>;
|
||||
|
||||
fn definition(&self, interface: &I) -> Result<(Self::BuffersIter, Self::AttribsIter),
|
||||
IncompatibleVertexDefinitionError>
|
||||
{
|
||||
fn definition(
|
||||
&self, interface: &I)
|
||||
-> Result<(Self::BuffersIter, Self::AttribsIter), IncompatibleVertexDefinitionError> {
|
||||
let attrib = {
|
||||
let mut attribs = Vec::with_capacity(interface.elements().len());
|
||||
for e in interface.elements() {
|
||||
@ -51,23 +55,29 @@ unsafe impl<T, U, I> VertexDefinition<I> for TwoBuffersDefinition<T, U>
|
||||
(infos, 1)
|
||||
} else {
|
||||
return Err(IncompatibleVertexDefinitionError::MissingAttribute {
|
||||
attribute: name.clone().into_owned()
|
||||
attribute: name.clone().into_owned(),
|
||||
});
|
||||
};
|
||||
|
||||
if !infos.ty.matches(infos.array_size, e.format,
|
||||
if !infos.ty.matches(infos.array_size,
|
||||
e.format,
|
||||
e.location.end - e.location.start)
|
||||
{
|
||||
return Err(IncompatibleVertexDefinitionError::FormatMismatch {
|
||||
attribute: name.clone().into_owned(),
|
||||
shader: (e.format, (e.location.end - e.location.start) as usize),
|
||||
definition: (infos.ty, infos.array_size),
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let mut offset = infos.offset;
|
||||
for loc in e.location.clone() {
|
||||
attribs.push((loc, buf_offset, AttributeInfo { offset: offset, format: e.format }));
|
||||
attribs.push((loc,
|
||||
buf_offset,
|
||||
AttributeInfo {
|
||||
offset: offset,
|
||||
format: e.format,
|
||||
}));
|
||||
offset += e.format.size().unwrap();
|
||||
}
|
||||
}
|
||||
@ -76,7 +86,7 @@ unsafe impl<T, U, I> VertexDefinition<I> for TwoBuffersDefinition<T, U>
|
||||
|
||||
let buffers = vec![
|
||||
(0, mem::size_of::<T>(), InputRate::Vertex),
|
||||
(1, mem::size_of::<U>(), InputRate::Vertex)
|
||||
(1, mem::size_of::<U>(), InputRate::Vertex),
|
||||
].into_iter();
|
||||
|
||||
Ok((buffers, attrib))
|
||||
@ -84,21 +94,29 @@ unsafe impl<T, U, I> VertexDefinition<I> for TwoBuffersDefinition<T, U>
|
||||
}
|
||||
|
||||
unsafe impl<T, U> VertexSource<Vec<Arc<BufferAccess + Send + Sync>>> for TwoBuffersDefinition<T, U>
|
||||
where T: Vertex, U: Vertex
|
||||
where T: Vertex,
|
||||
U: Vertex
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, source: Vec<Arc<BufferAccess + Send + Sync>>) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
fn decode(&self, source: Vec<Arc<BufferAccess + Send + Sync>>)
|
||||
-> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
unimplemented!() // FIXME: implement
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, U, Bt, Bu> VertexSource<(Bt, Bu)> for TwoBuffersDefinition<T, U>
|
||||
where T: Vertex, Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
|
||||
U: Vertex, Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
|
||||
where T: Vertex,
|
||||
Bt: TypedBufferAccess<Content = [T]> + Send + Sync + 'static,
|
||||
U: Vertex,
|
||||
Bu: TypedBufferAccess<Content = [U]> + Send + Sync + 'static
|
||||
{
|
||||
#[inline]
|
||||
fn decode(&self, source: (Bt, Bu)) -> (Vec<Box<BufferAccess + Send + Sync>>, usize, usize) {
|
||||
let vertices = [source.0.len(), source.1.len()].iter().cloned().min().unwrap();
|
||||
let vertices = [source.0.len(), source.1.len()]
|
||||
.iter()
|
||||
.cloned()
|
||||
.min()
|
||||
.unwrap();
|
||||
(vec![Box::new(source.0) as Box<_>, Box::new(source.1) as Box<_>], vertices, 1)
|
||||
}
|
||||
}
|
||||
|
@ -47,6 +47,7 @@
|
||||
//!
|
||||
//! In all cases the number of viewports and scissor boxes must be the same.
|
||||
//!
|
||||
|
||||
use std::ops::Range;
|
||||
use vk;
|
||||
|
||||
|
@ -21,24 +21,27 @@ use std::sync::Arc;
|
||||
|
||||
use device::Device;
|
||||
|
||||
use check_errors;
|
||||
use Error;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
pub struct UnsafeQueryPool<P = Arc<Device>> where P: SafeDeref<Target = Device> {
|
||||
pub struct UnsafeQueryPool<P = Arc<Device>>
|
||||
where P: SafeDeref<Target = Device>
|
||||
{
|
||||
pool: vk::QueryPool,
|
||||
device: P,
|
||||
num_slots: u32,
|
||||
}
|
||||
|
||||
impl<P> UnsafeQueryPool<P> where P: SafeDeref<Target = Device> {
|
||||
impl<P> UnsafeQueryPool<P>
|
||||
where P: SafeDeref<Target = Device>
|
||||
{
|
||||
/// Builds a new query pool.
|
||||
pub fn new(device: P, ty: QueryType, num_slots: u32)
|
||||
-> Result<UnsafeQueryPool<P>, QueryPoolCreationError>
|
||||
{
|
||||
-> Result<UnsafeQueryPool<P>, QueryPoolCreationError> {
|
||||
let (vk_ty, statistics) = match ty {
|
||||
QueryType::Occlusion => (vk::QUERY_TYPE_OCCLUSION, 0),
|
||||
QueryType::Timestamp => (vk::QUERY_TYPE_TIMESTAMP, 0),
|
||||
@ -63,8 +66,10 @@ impl<P> UnsafeQueryPool<P> where P: SafeDeref<Target = Device> {
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
let vk = device.pointers();
|
||||
try!(check_errors(vk.CreateQueryPool(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateQueryPool(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -169,7 +174,9 @@ impl Into<vk::QueryPipelineStatisticFlags> for QueryPipelineStatisticFlags {
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> Drop for UnsafeQueryPool<P> where P: SafeDeref<Target = Device> {
|
||||
impl<P> Drop for UnsafeQueryPool<P>
|
||||
where P: SafeDeref<Target = Device>
|
||||
{
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
@ -204,7 +211,7 @@ impl error::Error for QueryPoolCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
QueryPoolCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -229,7 +236,7 @@ impl From<Error> for QueryPoolCreationError {
|
||||
match err {
|
||||
err @ Error::OutOfHostMemory => QueryPoolCreationError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => QueryPoolCreationError::OomError(OomError::from(err)),
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -240,9 +247,7 @@ pub struct OcclusionQueriesPool {
|
||||
|
||||
impl OcclusionQueriesPool {
|
||||
/// See the docs of new().
|
||||
pub fn raw(device: Arc<Device>, num_slots: u32)
|
||||
-> Result<OcclusionQueriesPool, OomError>
|
||||
{
|
||||
pub fn raw(device: Arc<Device>, num_slots: u32) -> Result<OcclusionQueriesPool, OomError> {
|
||||
Ok(OcclusionQueriesPool {
|
||||
inner: match UnsafeQueryPool::new(device, QueryType::Occlusion, num_slots) {
|
||||
Ok(q) => q,
|
||||
@ -250,7 +255,7 @@ impl OcclusionQueriesPool {
|
||||
Err(QueryPoolCreationError::PipelineStatisticsQueryFeatureNotEnabled) => {
|
||||
unreachable!()
|
||||
},
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@ -261,9 +266,7 @@ impl OcclusionQueriesPool {
|
||||
/// - Panics if the device or host ran out of memory.
|
||||
///
|
||||
#[inline]
|
||||
pub fn new(device: Arc<Device>, num_slots: u32)
|
||||
-> Arc<OcclusionQueriesPool>
|
||||
{
|
||||
pub fn new(device: Arc<Device>, num_slots: u32) -> Arc<OcclusionQueriesPool> {
|
||||
Arc::new(OcclusionQueriesPool::raw(device, num_slots).unwrap())
|
||||
}
|
||||
|
||||
@ -301,7 +304,7 @@ mod tests {
|
||||
let ty = QueryType::PipelineStatistics(QueryPipelineStatisticFlags::none());
|
||||
match UnsafeQueryPool::new(device, ty, 256) {
|
||||
Err(QueryPoolCreationError::PipelineStatisticsQueryFeatureNotEnabled) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -61,17 +61,18 @@
|
||||
//! Samplers that don't use `ClampToBorder` are not concerned by these restrictions.
|
||||
//!
|
||||
// FIXME: restrictions aren't checked yet
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use device::Device;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
use vk;
|
||||
|
||||
pub use pipeline::depth_stencil::Compare;
|
||||
@ -99,9 +100,18 @@ impl Sampler {
|
||||
///
|
||||
#[inline]
|
||||
pub fn simple_repeat_linear(device: Arc<Device>) -> Arc<Sampler> {
|
||||
Sampler::new(device, Filter::Linear, Filter::Linear, MipmapMode::Linear,
|
||||
SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
|
||||
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 1_000.0).unwrap()
|
||||
Sampler::new(device,
|
||||
Filter::Linear,
|
||||
Filter::Linear,
|
||||
MipmapMode::Linear,
|
||||
SamplerAddressMode::Repeat,
|
||||
SamplerAddressMode::Repeat,
|
||||
SamplerAddressMode::Repeat,
|
||||
0.0,
|
||||
1.0,
|
||||
0.0,
|
||||
1_000.0)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Shortcut for creating a sampler with linear sampling, that only uses the main level of
|
||||
@ -115,9 +125,18 @@ impl Sampler {
|
||||
///
|
||||
#[inline]
|
||||
pub fn simple_repeat_linear_no_mipmap(device: Arc<Device>) -> Arc<Sampler> {
|
||||
Sampler::new(device, Filter::Linear, Filter::Linear, MipmapMode::Nearest,
|
||||
SamplerAddressMode::Repeat, SamplerAddressMode::Repeat,
|
||||
SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 1.0).unwrap()
|
||||
Sampler::new(device,
|
||||
Filter::Linear,
|
||||
Filter::Linear,
|
||||
MipmapMode::Nearest,
|
||||
SamplerAddressMode::Repeat,
|
||||
SamplerAddressMode::Repeat,
|
||||
SamplerAddressMode::Repeat,
|
||||
0.0,
|
||||
1.0,
|
||||
0.0,
|
||||
1.0)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Creates a new `Sampler` with the given behavior.
|
||||
@ -150,10 +169,19 @@ impl Sampler {
|
||||
mipmap_mode: MipmapMode, address_u: SamplerAddressMode,
|
||||
address_v: SamplerAddressMode, address_w: SamplerAddressMode, mip_lod_bias: f32,
|
||||
max_anisotropy: f32, min_lod: f32, max_lod: f32)
|
||||
-> Result<Arc<Sampler>, SamplerCreationError>
|
||||
{
|
||||
Sampler::new_impl(device, mag_filter, min_filter, mipmap_mode, address_u, address_v,
|
||||
address_w, mip_lod_bias, max_anisotropy, min_lod, max_lod, None)
|
||||
-> Result<Arc<Sampler>, SamplerCreationError> {
|
||||
Sampler::new_impl(device,
|
||||
mag_filter,
|
||||
min_filter,
|
||||
mipmap_mode,
|
||||
address_u,
|
||||
address_v,
|
||||
address_w,
|
||||
mip_lod_bias,
|
||||
max_anisotropy,
|
||||
min_lod,
|
||||
max_lod,
|
||||
None)
|
||||
}
|
||||
|
||||
/// Creates a new `Sampler` with the given behavior.
|
||||
@ -175,20 +203,29 @@ impl Sampler {
|
||||
#[inline(always)]
|
||||
pub fn compare(device: Arc<Device>, mag_filter: Filter, min_filter: Filter,
|
||||
mipmap_mode: MipmapMode, address_u: SamplerAddressMode,
|
||||
address_v: SamplerAddressMode, address_w: SamplerAddressMode, mip_lod_bias: f32,
|
||||
max_anisotropy: f32, min_lod: f32, max_lod: f32, compare: Compare)
|
||||
-> Result<Arc<Sampler>, SamplerCreationError>
|
||||
{
|
||||
Sampler::new_impl(device, mag_filter, min_filter, mipmap_mode, address_u, address_v,
|
||||
address_w, mip_lod_bias, max_anisotropy, min_lod, max_lod, Some(compare))
|
||||
address_v: SamplerAddressMode, address_w: SamplerAddressMode,
|
||||
mip_lod_bias: f32, max_anisotropy: f32, min_lod: f32, max_lod: f32,
|
||||
compare: Compare)
|
||||
-> Result<Arc<Sampler>, SamplerCreationError> {
|
||||
Sampler::new_impl(device,
|
||||
mag_filter,
|
||||
min_filter,
|
||||
mipmap_mode,
|
||||
address_u,
|
||||
address_v,
|
||||
address_w,
|
||||
mip_lod_bias,
|
||||
max_anisotropy,
|
||||
min_lod,
|
||||
max_lod,
|
||||
Some(compare))
|
||||
}
|
||||
|
||||
fn new_impl(device: Arc<Device>, mag_filter: Filter, min_filter: Filter,
|
||||
mipmap_mode: MipmapMode, address_u: SamplerAddressMode,
|
||||
address_v: SamplerAddressMode, address_w: SamplerAddressMode, mip_lod_bias: f32,
|
||||
max_anisotropy: f32, min_lod: f32, max_lod: f32, compare: Option<Compare>)
|
||||
-> Result<Arc<Sampler>, SamplerCreationError>
|
||||
{
|
||||
-> Result<Arc<Sampler>, SamplerCreationError> {
|
||||
assert!(max_anisotropy >= 1.0);
|
||||
assert!(min_lod <= max_lod);
|
||||
|
||||
@ -221,7 +258,8 @@ impl Sampler {
|
||||
// Check MirrorClampToEdge extension support
|
||||
if [address_u, address_v, address_w]
|
||||
.iter()
|
||||
.any(|&mode| mode == SamplerAddressMode::MirrorClampToEdge) {
|
||||
.any(|&mode| mode == SamplerAddressMode::MirrorClampToEdge)
|
||||
{
|
||||
if !device.loaded_extensions().khr_sampler_mirror_clamp_to_edge {
|
||||
return Err(SamplerCreationError::SamplerMirrorClampToEdgeExtensionNotEnabled);
|
||||
}
|
||||
@ -230,12 +268,18 @@ impl Sampler {
|
||||
// Handling border color.
|
||||
let border_color = address_u.border_color();
|
||||
let border_color = match (border_color, address_v.border_color()) {
|
||||
(Some(b1), Some(b2)) => { assert_eq!(b1, b2); Some(b1) },
|
||||
(Some(b1), Some(b2)) => {
|
||||
assert_eq!(b1, b2);
|
||||
Some(b1)
|
||||
},
|
||||
(None, b) => b,
|
||||
(b, None) => b,
|
||||
};
|
||||
let border_color = match (border_color, address_w.border_color()) {
|
||||
(Some(b1), Some(b2)) => { assert_eq!(b1, b2); Some(b1) },
|
||||
(Some(b1), Some(b2)) => {
|
||||
assert_eq!(b1, b2);
|
||||
Some(b1)
|
||||
},
|
||||
(None, b) => b,
|
||||
(b, None) => b,
|
||||
};
|
||||
@ -253,9 +297,17 @@ impl Sampler {
|
||||
addressModeV: address_v.to_vk(),
|
||||
addressModeW: address_w.to_vk(),
|
||||
mipLodBias: mip_lod_bias,
|
||||
anisotropyEnable: if max_anisotropy > 1.0 { vk::TRUE } else { vk::FALSE },
|
||||
anisotropyEnable: if max_anisotropy > 1.0 {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
maxAnisotropy: max_anisotropy,
|
||||
compareEnable: if compare.is_some() { vk::TRUE } else { vk:: FALSE },
|
||||
compareEnable: if compare.is_some() {
|
||||
vk::TRUE
|
||||
} else {
|
||||
vk::FALSE
|
||||
},
|
||||
compareOp: compare.map(|c| c as u32).unwrap_or(0),
|
||||
minLod: min_lod,
|
||||
maxLod: max_lod,
|
||||
@ -264,8 +316,10 @@ impl Sampler {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateSampler(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateSampler(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -281,7 +335,8 @@ impl Sampler {
|
||||
Some(_) => false,
|
||||
None => true,
|
||||
},
|
||||
usable_with_int_formats: compare.is_none() && match border_color {
|
||||
usable_with_int_formats: compare.is_none() &&
|
||||
match border_color {
|
||||
Some(BorderColor::IntTransparentBlack) => true,
|
||||
Some(BorderColor::IntOpaqueBlack) => true,
|
||||
Some(BorderColor::IntOpaqueWhite) => true,
|
||||
@ -312,13 +367,15 @@ impl Sampler {
|
||||
pub fn unnormalized(device: Arc<Device>, filter: Filter,
|
||||
address_u: UnnormalizedSamplerAddressMode,
|
||||
address_v: UnnormalizedSamplerAddressMode)
|
||||
-> Result<Arc<Sampler>, SamplerCreationError>
|
||||
{
|
||||
-> Result<Arc<Sampler>, SamplerCreationError> {
|
||||
let vk = device.pointers();
|
||||
|
||||
let border_color = address_u.border_color();
|
||||
let border_color = match (border_color, address_v.border_color()) {
|
||||
(Some(b1), Some(b2)) => { assert_eq!(b1, b2); Some(b1) },
|
||||
(Some(b1), Some(b2)) => {
|
||||
assert_eq!(b1, b2);
|
||||
Some(b1)
|
||||
},
|
||||
(None, b) => b,
|
||||
(b, None) => b,
|
||||
};
|
||||
@ -346,8 +403,10 @@ impl Sampler {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateSampler(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateSampler(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -507,7 +566,7 @@ impl SamplerAddressMode {
|
||||
fn border_color(self) -> Option<BorderColor> {
|
||||
match self {
|
||||
SamplerAddressMode::ClampToBorder(c) => Some(c),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -598,7 +657,7 @@ pub enum SamplerCreationError {
|
||||
/// The value that was requested.
|
||||
requested: f32,
|
||||
/// The maximum supported value.
|
||||
maximum: f32
|
||||
maximum: f32,
|
||||
},
|
||||
|
||||
/// The requested mip lod bias exceeds the device's limits.
|
||||
@ -606,7 +665,7 @@ pub enum SamplerCreationError {
|
||||
/// The value that was requested.
|
||||
requested: f32,
|
||||
/// The maximum supported value.
|
||||
maximum: f32
|
||||
maximum: f32,
|
||||
},
|
||||
|
||||
/// Using `MirrorClampToEdge` requires enabling the `VK_KHR_sampler_mirror_clamp_to_edge`
|
||||
@ -620,8 +679,8 @@ impl error::Error for SamplerCreationError {
|
||||
match *self {
|
||||
SamplerCreationError::OomError(_) => "not enough memory available",
|
||||
SamplerCreationError::TooManyObjects => "too many simultaneous sampler objects",
|
||||
SamplerCreationError::SamplerAnisotropyFeatureNotEnabled => "the `sampler_anisotropy` \
|
||||
feature is not enabled",
|
||||
SamplerCreationError::SamplerAnisotropyFeatureNotEnabled =>
|
||||
"the `sampler_anisotropy` feature is not enabled",
|
||||
SamplerCreationError::AnisotropyLimitExceeded { .. } => "anisotropy limit exceeded",
|
||||
SamplerCreationError::MipLodBiasLimitExceeded { .. } => "mip lod bias limit exceeded",
|
||||
SamplerCreationError::SamplerMirrorClampToEdgeExtensionNotEnabled =>
|
||||
@ -633,7 +692,7 @@ impl error::Error for SamplerCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
SamplerCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -659,7 +718,7 @@ impl From<Error> for SamplerCreationError {
|
||||
err @ Error::OutOfHostMemory => SamplerCreationError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => SamplerCreationError::OomError(OomError::from(err)),
|
||||
Error::TooManyObjects => SamplerCreationError::TooManyObjects,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -672,12 +731,18 @@ mod tests {
|
||||
fn create_regular() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let s = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let s = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat, 1.0, 1.0,
|
||||
0.0, 2.0).unwrap();
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
0.0,
|
||||
2.0)
|
||||
.unwrap();
|
||||
assert!(!s.compare_mode());
|
||||
assert!(!s.is_unnormalized());
|
||||
}
|
||||
@ -686,12 +751,19 @@ mod tests {
|
||||
fn create_compare() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let s = sampler::Sampler::compare(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let s = sampler::Sampler::compare(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat, 1.0, 1.0,
|
||||
0.0, 2.0, sampler::Compare::Less).unwrap();
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
0.0,
|
||||
2.0,
|
||||
sampler::Compare::Less)
|
||||
.unwrap();
|
||||
|
||||
assert!(s.compare_mode());
|
||||
assert!(!s.is_unnormalized());
|
||||
@ -701,7 +773,9 @@ mod tests {
|
||||
fn create_unnormalized() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let s = sampler::Sampler::unnormalized(device, sampler::Filter::Linear,
|
||||
let s =
|
||||
sampler::Sampler::unnormalized(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::UnnormalizedSamplerAddressMode::ClampToEdge,
|
||||
sampler::UnnormalizedSamplerAddressMode::ClampToEdge)
|
||||
.unwrap();
|
||||
@ -727,11 +801,17 @@ mod tests {
|
||||
fn min_lod_inferior() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let _ = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat, 1.0, 1.0, 5.0, 2.0);
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
5.0,
|
||||
2.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -739,11 +819,17 @@ mod tests {
|
||||
fn max_anisotropy() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let _ = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat, 1.0, 0.5, 0.0, 2.0);
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
0.5,
|
||||
0.0,
|
||||
2.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -754,26 +840,38 @@ mod tests {
|
||||
let b1 = sampler::BorderColor::IntTransparentBlack;
|
||||
let b2 = sampler::BorderColor::FloatOpaqueWhite;
|
||||
|
||||
let _ = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let _ = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::ClampToBorder(b1),
|
||||
sampler::SamplerAddressMode::ClampToBorder(b2),
|
||||
sampler::SamplerAddressMode::Repeat, 1.0, 1.0, 5.0, 2.0);
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
1.0,
|
||||
5.0,
|
||||
2.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn anisotropy_feature() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let r = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat, 1.0, 2.0, 0.0, 2.0);
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
2.0,
|
||||
0.0,
|
||||
2.0);
|
||||
|
||||
match r {
|
||||
Err(sampler::SamplerCreationError::SamplerAnisotropyFeatureNotEnabled) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -781,16 +879,21 @@ mod tests {
|
||||
fn anisotropy_limit() {
|
||||
let (device, queue) = gfx_dev_and_queue!(sampler_anisotropy);
|
||||
|
||||
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let r = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat, 1.0, 100000000.0, 0.0,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
1.0,
|
||||
100000000.0,
|
||||
0.0,
|
||||
2.0);
|
||||
|
||||
match r {
|
||||
Err(sampler::SamplerCreationError::AnisotropyLimitExceeded { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -798,16 +901,21 @@ mod tests {
|
||||
fn mip_lod_bias_limit() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let r = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
sampler::SamplerAddressMode::Repeat, 100000000.0, 1.0, 0.0,
|
||||
sampler::SamplerAddressMode::Repeat,
|
||||
100000000.0,
|
||||
1.0,
|
||||
0.0,
|
||||
2.0);
|
||||
|
||||
match r {
|
||||
Err(sampler::SamplerCreationError::MipLodBiasLimitExceeded { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -815,16 +923,21 @@ mod tests {
|
||||
fn sampler_mirror_clamp_to_edge_extension() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let r = sampler::Sampler::new(device, sampler::Filter::Linear, sampler::Filter::Linear,
|
||||
let r = sampler::Sampler::new(device,
|
||||
sampler::Filter::Linear,
|
||||
sampler::Filter::Linear,
|
||||
sampler::MipmapMode::Nearest,
|
||||
sampler::SamplerAddressMode::MirrorClampToEdge,
|
||||
sampler::SamplerAddressMode::MirrorClampToEdge,
|
||||
sampler::SamplerAddressMode::MirrorClampToEdge, 1.0, 1.0,
|
||||
0.0, 2.0);
|
||||
sampler::SamplerAddressMode::MirrorClampToEdge,
|
||||
1.0,
|
||||
1.0,
|
||||
0.0,
|
||||
2.0);
|
||||
|
||||
match r {
|
||||
Err(sampler::SamplerCreationError::SamplerMirrorClampToEdgeExtensionNotEnabled) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ pub fn supported_present_modes_from_list<I>(elem: I) -> SupportedPresentModes
|
||||
vk::PRESENT_MODE_MAILBOX_KHR => result.mailbox = true,
|
||||
vk::PRESENT_MODE_FIFO_KHR => result.fifo = true,
|
||||
vk::PRESENT_MODE_FIFO_RELAXED_KHR => result.relaxed = true,
|
||||
_ => panic!("Wrong value for vk::PresentModeKHR")
|
||||
_ => panic!("Wrong value for vk::PresentModeKHR"),
|
||||
}
|
||||
}
|
||||
result
|
||||
@ -148,10 +148,22 @@ impl Iterator for SupportedPresentModesIter {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<PresentMode> {
|
||||
if self.0.immediate { self.0.immediate = false; return Some(PresentMode::Immediate); }
|
||||
if self.0.mailbox { self.0.mailbox = false; return Some(PresentMode::Mailbox); }
|
||||
if self.0.fifo { self.0.fifo = false; return Some(PresentMode::Fifo); }
|
||||
if self.0.relaxed { self.0.relaxed = false; return Some(PresentMode::Relaxed); }
|
||||
if self.0.immediate {
|
||||
self.0.immediate = false;
|
||||
return Some(PresentMode::Immediate);
|
||||
}
|
||||
if self.0.mailbox {
|
||||
self.0.mailbox = false;
|
||||
return Some(PresentMode::Mailbox);
|
||||
}
|
||||
if self.0.fifo {
|
||||
self.0.fifo = false;
|
||||
return Some(PresentMode::Fifo);
|
||||
}
|
||||
if self.0.relaxed {
|
||||
self.0.relaxed = false;
|
||||
return Some(PresentMode::Relaxed);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
@ -214,10 +226,18 @@ pub struct SupportedCompositeAlpha {
|
||||
|
||||
pub fn supported_composite_alpha_from_bits(val: u32) -> SupportedCompositeAlpha {
|
||||
let mut result = SupportedCompositeAlpha::none();
|
||||
if (val & vk::COMPOSITE_ALPHA_OPAQUE_BIT_KHR) != 0 { result.opaque = true; }
|
||||
if (val & vk::COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR) != 0 { result.pre_multiplied = true; }
|
||||
if (val & vk::COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR) != 0 { result.post_multiplied = true; }
|
||||
if (val & vk::COMPOSITE_ALPHA_INHERIT_BIT_KHR) != 0 { result.inherit = true; }
|
||||
if (val & vk::COMPOSITE_ALPHA_OPAQUE_BIT_KHR) != 0 {
|
||||
result.opaque = true;
|
||||
}
|
||||
if (val & vk::COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR) != 0 {
|
||||
result.pre_multiplied = true;
|
||||
}
|
||||
if (val & vk::COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR) != 0 {
|
||||
result.post_multiplied = true;
|
||||
}
|
||||
if (val & vk::COMPOSITE_ALPHA_INHERIT_BIT_KHR) != 0 {
|
||||
result.inherit = true;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
@ -260,10 +280,22 @@ impl Iterator for SupportedCompositeAlphaIter {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<CompositeAlpha> {
|
||||
if self.0.opaque { self.0.opaque = false; return Some(CompositeAlpha::Opaque); }
|
||||
if self.0.pre_multiplied { self.0.pre_multiplied = false; return Some(CompositeAlpha::PreMultiplied); }
|
||||
if self.0.post_multiplied { self.0.post_multiplied = false; return Some(CompositeAlpha::PostMultiplied); }
|
||||
if self.0.inherit { self.0.inherit = false; return Some(CompositeAlpha::Inherit); }
|
||||
if self.0.opaque {
|
||||
self.0.opaque = false;
|
||||
return Some(CompositeAlpha::Opaque);
|
||||
}
|
||||
if self.0.pre_multiplied {
|
||||
self.0.pre_multiplied = false;
|
||||
return Some(CompositeAlpha::PreMultiplied);
|
||||
}
|
||||
if self.0.post_multiplied {
|
||||
self.0.post_multiplied = false;
|
||||
return Some(CompositeAlpha::PostMultiplied);
|
||||
}
|
||||
if self.0.inherit {
|
||||
self.0.inherit = false;
|
||||
return Some(CompositeAlpha::Inherit);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
@ -282,7 +314,8 @@ pub struct SupportedSurfaceTransforms {
|
||||
pub inherit: bool,
|
||||
}
|
||||
|
||||
pub fn surface_transforms_from_bits(val: vk::SurfaceTransformFlagsKHR) -> SupportedSurfaceTransforms {
|
||||
pub fn surface_transforms_from_bits(val: vk::SurfaceTransformFlagsKHR)
|
||||
-> SupportedSurfaceTransforms {
|
||||
macro_rules! v {
|
||||
($val:expr, $out:ident, $e:expr, $f:ident) => (
|
||||
if ($val & $e) != 0 { $out.$f = true; }
|
||||
@ -290,16 +323,37 @@ pub fn surface_transforms_from_bits(val: vk::SurfaceTransformFlagsKHR) -> Suppor
|
||||
}
|
||||
|
||||
let mut result = SupportedSurfaceTransforms::none();
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR, identity);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_ROTATE_90_BIT_KHR, rotate90);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_ROTATE_180_BIT_KHR, rotate180);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_ROTATE_270_BIT_KHR, rotate270);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR, horizontal_mirror);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR,
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR,
|
||||
identity);
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_ROTATE_90_BIT_KHR,
|
||||
rotate90);
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_ROTATE_180_BIT_KHR,
|
||||
rotate180);
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_ROTATE_270_BIT_KHR,
|
||||
rotate270);
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR,
|
||||
horizontal_mirror);
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR,
|
||||
horizontal_mirror_rotate90);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR,
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR,
|
||||
horizontal_mirror_rotate180);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR,
|
||||
v!(val,
|
||||
result,
|
||||
vk::SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR,
|
||||
horizontal_mirror_rotate270);
|
||||
v!(val, result, vk::SURFACE_TRANSFORM_INHERIT_BIT_KHR, inherit);
|
||||
result
|
||||
@ -354,15 +408,42 @@ impl Iterator for SupportedSurfaceTransformsIter {
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<SurfaceTransform> {
|
||||
if self.0.identity { self.0.identity = false; return Some(SurfaceTransform::Identity); }
|
||||
if self.0.rotate90 { self.0.rotate90 = false; return Some(SurfaceTransform::Rotate90); }
|
||||
if self.0.rotate180 { self.0.rotate180 = false; return Some(SurfaceTransform::Rotate180); }
|
||||
if self.0.rotate270 { self.0.rotate270 = false; return Some(SurfaceTransform::Rotate270); }
|
||||
if self.0.horizontal_mirror { self.0.horizontal_mirror = false; return Some(SurfaceTransform::HorizontalMirror); }
|
||||
if self.0.horizontal_mirror_rotate90 { self.0.horizontal_mirror_rotate90 = false; return Some(SurfaceTransform::HorizontalMirrorRotate90); }
|
||||
if self.0.horizontal_mirror_rotate180 { self.0.horizontal_mirror_rotate180 = false; return Some(SurfaceTransform::HorizontalMirrorRotate180); }
|
||||
if self.0.horizontal_mirror_rotate270 { self.0.horizontal_mirror_rotate270 = false; return Some(SurfaceTransform::HorizontalMirrorRotate270); }
|
||||
if self.0.inherit { self.0.inherit = false; return Some(SurfaceTransform::Inherit); }
|
||||
if self.0.identity {
|
||||
self.0.identity = false;
|
||||
return Some(SurfaceTransform::Identity);
|
||||
}
|
||||
if self.0.rotate90 {
|
||||
self.0.rotate90 = false;
|
||||
return Some(SurfaceTransform::Rotate90);
|
||||
}
|
||||
if self.0.rotate180 {
|
||||
self.0.rotate180 = false;
|
||||
return Some(SurfaceTransform::Rotate180);
|
||||
}
|
||||
if self.0.rotate270 {
|
||||
self.0.rotate270 = false;
|
||||
return Some(SurfaceTransform::Rotate270);
|
||||
}
|
||||
if self.0.horizontal_mirror {
|
||||
self.0.horizontal_mirror = false;
|
||||
return Some(SurfaceTransform::HorizontalMirror);
|
||||
}
|
||||
if self.0.horizontal_mirror_rotate90 {
|
||||
self.0.horizontal_mirror_rotate90 = false;
|
||||
return Some(SurfaceTransform::HorizontalMirrorRotate90);
|
||||
}
|
||||
if self.0.horizontal_mirror_rotate180 {
|
||||
self.0.horizontal_mirror_rotate180 = false;
|
||||
return Some(SurfaceTransform::HorizontalMirrorRotate180);
|
||||
}
|
||||
if self.0.horizontal_mirror_rotate270 {
|
||||
self.0.horizontal_mirror_rotate270 = false;
|
||||
return Some(SurfaceTransform::HorizontalMirrorRotate270);
|
||||
}
|
||||
if self.0.inherit {
|
||||
self.0.inherit = false;
|
||||
return Some(SurfaceTransform::Inherit);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
@ -497,6 +578,6 @@ pub fn color_space_from_num(val: u32) -> ColorSpace {
|
||||
vk::COLOR_SPACE_ADOBERGB_LINEAR_EXT => ColorSpace::AdobeRgbLinear,
|
||||
vk::COLOR_SPACE_ADOBERGB_NONLINEAR_EXT => ColorSpace::AdobeRgbNonLinear,
|
||||
vk::COLOR_SPACE_PASS_THROUGH_EXT => ColorSpace::PassThrough,
|
||||
_ => panic!("Wrong value for color space enum")
|
||||
_ => panic!("Wrong value for color space enum"),
|
||||
}
|
||||
}
|
||||
|
@ -35,12 +35,12 @@ use std::vec::IntoIter;
|
||||
|
||||
use instance::Instance;
|
||||
use instance::PhysicalDevice;
|
||||
use swapchain::capabilities;
|
||||
use swapchain::SupportedSurfaceTransforms;
|
||||
use swapchain::capabilities;
|
||||
|
||||
use check_errors;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
// TODO: extract this to a `display` module and solve the visibility problems
|
||||
@ -65,17 +65,18 @@ impl DisplayPlane {
|
||||
|
||||
let num = unsafe {
|
||||
let mut num: u32 = 0;
|
||||
try!(check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
|
||||
&mut num, ptr::null_mut())));
|
||||
check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
|
||||
&mut num,
|
||||
ptr::null_mut()))?;
|
||||
num
|
||||
};
|
||||
|
||||
let planes: Vec<vk::DisplayPlanePropertiesKHR> = unsafe {
|
||||
let mut planes = Vec::with_capacity(num as usize);
|
||||
let mut num = num;
|
||||
try!(check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
|
||||
check_errors(vk.GetPhysicalDeviceDisplayPlanePropertiesKHR(device.internal_object(),
|
||||
&mut num,
|
||||
planes.as_mut_ptr())));
|
||||
planes.as_mut_ptr()))?;
|
||||
planes.set_len(num as usize);
|
||||
planes
|
||||
};
|
||||
@ -140,7 +141,10 @@ impl DisplayPlane {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.supported_displays.iter().find(|&&d| d == display.internal_object()).is_some()
|
||||
self.supported_displays
|
||||
.iter()
|
||||
.find(|&&d| d == display.internal_object())
|
||||
.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,28 +165,33 @@ impl Display {
|
||||
|
||||
let num = unsafe {
|
||||
let mut num = 0;
|
||||
try!(check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
|
||||
&mut num, ptr::null_mut())));
|
||||
check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
|
||||
&mut num,
|
||||
ptr::null_mut()))?;
|
||||
num
|
||||
};
|
||||
|
||||
let displays: Vec<vk::DisplayPropertiesKHR> = unsafe {
|
||||
let mut displays = Vec::with_capacity(num as usize);
|
||||
let mut num = num;
|
||||
try!(check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
|
||||
check_errors(vk.GetPhysicalDeviceDisplayPropertiesKHR(device.internal_object(),
|
||||
&mut num,
|
||||
displays.as_mut_ptr())));
|
||||
displays.as_mut_ptr()))?;
|
||||
displays.set_len(num as usize);
|
||||
displays
|
||||
};
|
||||
|
||||
Ok(displays.into_iter().map(|prop| {
|
||||
Ok(displays
|
||||
.into_iter()
|
||||
.map(|prop| {
|
||||
Display {
|
||||
instance: device.instance().clone(),
|
||||
physical_device: device.index(),
|
||||
properties: Arc::new(prop),
|
||||
}
|
||||
}).collect::<Vec<_>>().into_iter())
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter())
|
||||
}
|
||||
|
||||
/// Enumerates all the displays that are available on a given physical device.
|
||||
@ -254,29 +263,35 @@ impl Display {
|
||||
|
||||
let num = unsafe {
|
||||
let mut num = 0;
|
||||
try!(check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
|
||||
check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
|
||||
self.properties.display,
|
||||
&mut num, ptr::null_mut())));
|
||||
&mut num,
|
||||
ptr::null_mut()))?;
|
||||
num
|
||||
};
|
||||
|
||||
let modes: Vec<vk::DisplayModePropertiesKHR> = unsafe {
|
||||
let mut modes = Vec::with_capacity(num as usize);
|
||||
let mut num = num;
|
||||
try!(check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
|
||||
self.properties.display, &mut num,
|
||||
modes.as_mut_ptr())));
|
||||
check_errors(vk.GetDisplayModePropertiesKHR(self.physical_device().internal_object(),
|
||||
self.properties.display,
|
||||
&mut num,
|
||||
modes.as_mut_ptr()))?;
|
||||
modes.set_len(num as usize);
|
||||
modes
|
||||
};
|
||||
|
||||
Ok(modes.into_iter().map(|mode| {
|
||||
Ok(modes
|
||||
.into_iter()
|
||||
.map(|mode| {
|
||||
DisplayMode {
|
||||
display: self.clone(),
|
||||
display_mode: mode.displayMode,
|
||||
parameters: mode.parameters,
|
||||
}
|
||||
}).collect::<Vec<_>>().into_iter())
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter())
|
||||
}
|
||||
|
||||
/// Returns a list of all modes available on this display.
|
||||
|
@ -194,24 +194,24 @@
|
||||
use std::sync::atomic::AtomicBool;
|
||||
|
||||
pub use self::capabilities::Capabilities;
|
||||
pub use self::capabilities::PresentMode;
|
||||
pub use self::capabilities::SupportedPresentModes;
|
||||
pub use self::capabilities::SupportedPresentModesIter;
|
||||
pub use self::capabilities::SurfaceTransform;
|
||||
pub use self::capabilities::ColorSpace;
|
||||
pub use self::capabilities::CompositeAlpha;
|
||||
pub use self::capabilities::PresentMode;
|
||||
pub use self::capabilities::SupportedCompositeAlpha;
|
||||
pub use self::capabilities::SupportedCompositeAlphaIter;
|
||||
pub use self::capabilities::ColorSpace;
|
||||
pub use self::capabilities::SupportedPresentModes;
|
||||
pub use self::capabilities::SupportedPresentModesIter;
|
||||
pub use self::capabilities::SupportedSurfaceTransforms;
|
||||
pub use self::capabilities::SupportedSurfaceTransformsIter;
|
||||
pub use self::capabilities::SurfaceTransform;
|
||||
pub use self::surface::CapabilitiesError;
|
||||
pub use self::surface::Surface;
|
||||
pub use self::surface::SurfaceCreationError;
|
||||
pub use self::surface::CapabilitiesError;
|
||||
pub use self::swapchain::AcquireError;
|
||||
pub use self::swapchain::PresentFuture;
|
||||
pub use self::swapchain::Swapchain;
|
||||
pub use self::swapchain::SwapchainCreationError;
|
||||
pub use self::swapchain::SwapchainAcquireFuture;
|
||||
pub use self::swapchain::SwapchainCreationError;
|
||||
pub use self::swapchain::acquire_next_image;
|
||||
pub use self::swapchain::present;
|
||||
|
||||
|
@ -20,16 +20,16 @@ use image::ImageUsage;
|
||||
use instance::Instance;
|
||||
use instance::PhysicalDevice;
|
||||
use instance::QueueFamily;
|
||||
use swapchain::capabilities;
|
||||
use swapchain::Capabilities;
|
||||
use swapchain::SurfaceSwapchainLock;
|
||||
use swapchain::capabilities;
|
||||
use swapchain::display::DisplayMode;
|
||||
use swapchain::display::DisplayPlane;
|
||||
|
||||
use check_errors;
|
||||
use Error;
|
||||
use OomError;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
/// Represents a surface on the screen.
|
||||
@ -53,9 +53,14 @@ impl Surface {
|
||||
/// - Panics if `plane` doesn't support the display of `display_mode`.
|
||||
///
|
||||
pub fn from_display_mode(display_mode: &DisplayMode, plane: &DisplayPlane)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
if !display_mode
|
||||
.display()
|
||||
.physical_device()
|
||||
.instance()
|
||||
.loaded_extensions()
|
||||
.khr_display
|
||||
{
|
||||
if !display_mode.display().physical_device().instance().loaded_extensions().khr_display {
|
||||
return Err(SurfaceCreationError::MissingExtension { name: "VK_KHR_display" });
|
||||
}
|
||||
|
||||
@ -77,15 +82,18 @@ impl Surface {
|
||||
transform: vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR, // TODO: let user choose
|
||||
globalAlpha: 0.0, // TODO: let user choose
|
||||
alphaMode: vk::DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR, // TODO: let user choose
|
||||
imageExtent: vk::Extent2D { // TODO: let user choose
|
||||
imageExtent: vk::Extent2D {
|
||||
// TODO: let user choose
|
||||
width: display_mode.visible_region()[0],
|
||||
height: display_mode.visible_region()[1],
|
||||
},
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateDisplayPlaneSurfaceKHR(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateDisplayPlaneSurfaceKHR(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -105,8 +113,7 @@ impl Surface {
|
||||
/// The caller must ensure that the `hinstance` and the `hwnd` are both correct and stay
|
||||
/// alive for the entire lifetime of the surface.
|
||||
pub unsafe fn from_hwnd<T, U>(instance: Arc<Instance>, hinstance: *const T, hwnd: *const U)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().khr_win32_surface {
|
||||
@ -123,8 +130,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateWin32SurfaceKHR(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateWin32SurfaceKHR(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -144,8 +153,7 @@ impl Surface {
|
||||
/// The caller must ensure that the `connection` and the `window` are both correct and stay
|
||||
/// alive for the entire lifetime of the surface.
|
||||
pub unsafe fn from_xcb<C>(instance: Arc<Instance>, connection: *const C, window: u32)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().khr_xcb_surface {
|
||||
@ -162,8 +170,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateXcbSurfaceKHR(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateXcbSurfaceKHR(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -183,8 +193,7 @@ impl Surface {
|
||||
/// The caller must ensure that the `display` and the `window` are both correct and stay
|
||||
/// alive for the entire lifetime of the surface.
|
||||
pub unsafe fn from_xlib<D>(instance: Arc<Instance>, display: *const D, window: c_ulong)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().khr_xlib_surface {
|
||||
@ -201,8 +210,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateXlibSurfaceKHR(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateXlibSurfaceKHR(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -221,9 +232,9 @@ impl Surface {
|
||||
///
|
||||
/// The caller must ensure that the `display` and the `surface` are both correct and stay
|
||||
/// alive for the entire lifetime of the surface.
|
||||
pub unsafe fn from_wayland<D, S>(instance: Arc<Instance>, display: *const D, surface: *const S)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
pub unsafe fn from_wayland<D, S>(instance: Arc<Instance>, display: *const D,
|
||||
surface: *const S)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().khr_wayland_surface {
|
||||
@ -240,8 +251,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateWaylandSurfaceKHR(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateWaylandSurfaceKHR(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -262,8 +275,7 @@ impl Surface {
|
||||
/// The caller must ensure that the `connection` and the `surface` are both correct and stay
|
||||
/// alive for the entire lifetime of the surface.
|
||||
pub unsafe fn from_mir<C, S>(instance: Arc<Instance>, connection: *const C, surface: *const S)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().khr_mir_surface {
|
||||
@ -280,8 +292,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateMirSurfaceKHR(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateMirSurfaceKHR(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -299,8 +313,7 @@ impl Surface {
|
||||
/// The caller must ensure that the `window` is correct and stays alive for the entire
|
||||
/// lifetime of the surface.
|
||||
pub unsafe fn from_anativewindow<T>(instance: Arc<Instance>, window: *const T)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().khr_android_surface {
|
||||
@ -316,8 +329,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateAndroidSurfaceKHR(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateAndroidSurfaceKHR(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -336,8 +351,7 @@ impl Surface {
|
||||
/// lifetime of the surface.
|
||||
/// - The `UIView` must be backed by a `CALayer` instance of type `CAMetalLayer`.
|
||||
pub unsafe fn from_ios_moltenvk<T>(instance: Arc<Instance>, view: *const T)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().mvk_ios_surface {
|
||||
@ -353,8 +367,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateIOSSurfaceMVK(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateIOSSurfaceMVK(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -373,8 +389,7 @@ impl Surface {
|
||||
/// lifetime of the surface.
|
||||
/// - The `NSView` must be backed by a `CALayer` instance of type `CAMetalLayer`.
|
||||
pub unsafe fn from_macos_moltenvk<T>(instance: Arc<Instance>, view: *const T)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().mvk_macos_surface {
|
||||
@ -390,8 +405,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateMacOSSurfaceMVK(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateMacOSSurfaceMVK(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -409,8 +426,7 @@ impl Surface {
|
||||
/// The caller must ensure that the `window` is correct and stays alive for the entire
|
||||
/// lifetime of the surface.
|
||||
pub unsafe fn from_vi_surface<T>(instance: Arc<Instance>, window: *const T)
|
||||
-> Result<Arc<Surface>, SurfaceCreationError>
|
||||
{
|
||||
-> Result<Arc<Surface>, SurfaceCreationError> {
|
||||
let vk = instance.pointers();
|
||||
|
||||
if !instance.loaded_extensions().nn_vi_surface {
|
||||
@ -426,8 +442,10 @@ impl Surface {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateViSurfaceNN(instance.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateViSurfaceNN(instance.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -445,10 +463,12 @@ impl Surface {
|
||||
let vk = self.instance.pointers();
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(
|
||||
vk.GetPhysicalDeviceSurfaceSupportKHR(queue.physical_device().internal_object(),
|
||||
queue.id(), self.surface, &mut output)
|
||||
));
|
||||
check_errors(vk.GetPhysicalDeviceSurfaceSupportKHR(queue
|
||||
.physical_device()
|
||||
.internal_object(),
|
||||
queue.id(),
|
||||
self.surface,
|
||||
&mut output))?;
|
||||
Ok(output != 0)
|
||||
}
|
||||
}
|
||||
@ -467,54 +487,53 @@ impl Surface {
|
||||
///
|
||||
pub fn capabilities(&self, device: PhysicalDevice) -> Result<Capabilities, CapabilitiesError> {
|
||||
unsafe {
|
||||
assert_eq!(&*self.instance as *const _, &**device.instance() as *const _,
|
||||
assert_eq!(&*self.instance as *const _,
|
||||
&**device.instance() as *const _,
|
||||
"Instance mismatch in Surface::capabilities");
|
||||
|
||||
let vk = self.instance.pointers();
|
||||
|
||||
let caps = {
|
||||
let mut out: vk::SurfaceCapabilitiesKHR = mem::uninitialized();
|
||||
try!(check_errors(
|
||||
vk.GetPhysicalDeviceSurfaceCapabilitiesKHR(device.internal_object(),
|
||||
self.surface, &mut out)
|
||||
));
|
||||
check_errors(vk.GetPhysicalDeviceSurfaceCapabilitiesKHR(device.internal_object(),
|
||||
self.surface,
|
||||
&mut out))?;
|
||||
out
|
||||
};
|
||||
|
||||
let formats = {
|
||||
let mut num = 0;
|
||||
try!(check_errors(
|
||||
vk.GetPhysicalDeviceSurfaceFormatsKHR(device.internal_object(),
|
||||
self.surface, &mut num,
|
||||
ptr::null_mut())
|
||||
));
|
||||
check_errors(vk.GetPhysicalDeviceSurfaceFormatsKHR(device.internal_object(),
|
||||
self.surface,
|
||||
&mut num,
|
||||
ptr::null_mut()))?;
|
||||
|
||||
let mut formats = Vec::with_capacity(num as usize);
|
||||
try!(check_errors(
|
||||
vk.GetPhysicalDeviceSurfaceFormatsKHR(device.internal_object(),
|
||||
self.surface, &mut num,
|
||||
formats.as_mut_ptr())
|
||||
));
|
||||
check_errors(vk.GetPhysicalDeviceSurfaceFormatsKHR(device.internal_object(),
|
||||
self.surface,
|
||||
&mut num,
|
||||
formats.as_mut_ptr()))?;
|
||||
formats.set_len(num as usize);
|
||||
formats
|
||||
};
|
||||
|
||||
let modes = {
|
||||
let mut num = 0;
|
||||
try!(check_errors(
|
||||
vk.GetPhysicalDeviceSurfacePresentModesKHR(device.internal_object(),
|
||||
self.surface, &mut num,
|
||||
ptr::null_mut())
|
||||
));
|
||||
check_errors(vk.GetPhysicalDeviceSurfacePresentModesKHR(device.internal_object(),
|
||||
self.surface,
|
||||
&mut num,
|
||||
ptr::null_mut()))?;
|
||||
|
||||
let mut modes = Vec::with_capacity(num as usize);
|
||||
try!(check_errors(
|
||||
vk.GetPhysicalDeviceSurfacePresentModesKHR(device.internal_object(),
|
||||
self.surface, &mut num,
|
||||
modes.as_mut_ptr())
|
||||
));
|
||||
check_errors(vk.GetPhysicalDeviceSurfacePresentModesKHR(device.internal_object(),
|
||||
self.surface,
|
||||
&mut num,
|
||||
modes.as_mut_ptr()))?;
|
||||
modes.set_len(num as usize);
|
||||
debug_assert!(modes.iter().find(|&&m| m == vk::PRESENT_MODE_FIFO_KHR).is_some());
|
||||
debug_assert!(modes
|
||||
.iter()
|
||||
.find(|&&m| m == vk::PRESENT_MODE_FIFO_KHR)
|
||||
.is_some());
|
||||
debug_assert!(modes.iter().count() > 0);
|
||||
capabilities::supported_present_modes_from_list(modes.into_iter())
|
||||
};
|
||||
@ -598,7 +617,7 @@ pub enum SurfaceCreationError {
|
||||
/// The extension required for this function was not enabled.
|
||||
MissingExtension {
|
||||
/// Name of the missing extension.
|
||||
name: &'static str
|
||||
name: &'static str,
|
||||
},
|
||||
}
|
||||
|
||||
@ -607,8 +626,8 @@ impl error::Error for SurfaceCreationError {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
SurfaceCreationError::OomError(_) => "not enough memory available",
|
||||
SurfaceCreationError::MissingExtension { .. } => "the extension required for this \
|
||||
function was not enabled",
|
||||
SurfaceCreationError::MissingExtension { .. } =>
|
||||
"the extension required for this function was not enabled",
|
||||
}
|
||||
}
|
||||
|
||||
@ -616,7 +635,7 @@ impl error::Error for SurfaceCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
SurfaceCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -641,7 +660,7 @@ impl From<Error> for SurfaceCreationError {
|
||||
match err {
|
||||
err @ Error::OutOfHostMemory => SurfaceCreationError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => SurfaceCreationError::OomError(OomError::from(err)),
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -670,7 +689,7 @@ impl error::Error for CapabilitiesError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
CapabilitiesError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -696,7 +715,7 @@ impl From<Error> for CapabilitiesError {
|
||||
err @ Error::OutOfHostMemory => CapabilitiesError::OomError(OomError::from(err)),
|
||||
err @ Error::OutOfDeviceMemory => CapabilitiesError::OomError(OomError::from(err)),
|
||||
Error::SurfaceLost => CapabilitiesError::SurfaceLost,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -712,7 +731,7 @@ mod tests {
|
||||
let instance = instance!();
|
||||
match unsafe { Surface::from_hwnd(instance, ptr::null::<u8>(), ptr::null::<u8>()) } {
|
||||
Err(SurfaceCreationError::MissingExtension { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -721,7 +740,7 @@ mod tests {
|
||||
let instance = instance!();
|
||||
match unsafe { Surface::from_xcb(instance, ptr::null::<u8>(), 0) } {
|
||||
Err(SurfaceCreationError::MissingExtension { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -730,7 +749,7 @@ mod tests {
|
||||
let instance = instance!();
|
||||
match unsafe { Surface::from_xlib(instance, ptr::null::<u8>(), 0) } {
|
||||
Err(SurfaceCreationError::MissingExtension { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -739,7 +758,7 @@ mod tests {
|
||||
let instance = instance!();
|
||||
match unsafe { Surface::from_wayland(instance, ptr::null::<u8>(), ptr::null::<u8>()) } {
|
||||
Err(SurfaceCreationError::MissingExtension { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -748,7 +767,7 @@ mod tests {
|
||||
let instance = instance!();
|
||||
match unsafe { Surface::from_mir(instance, ptr::null::<u8>(), ptr::null::<u8>()) } {
|
||||
Err(SurfaceCreationError::MissingExtension { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -757,7 +776,7 @@ mod tests {
|
||||
let instance = instance!();
|
||||
match unsafe { Surface::from_anativewindow(instance, ptr::null::<u8>()) } {
|
||||
Err(SurfaceCreationError::MissingExtension { .. }) => (),
|
||||
_ => panic!()
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,15 +30,15 @@ use image::ImageAccess;
|
||||
use image::ImageDimensions;
|
||||
use image::ImageLayout;
|
||||
use image::ImageUsage;
|
||||
use image::sys::UnsafeImage;
|
||||
use image::swapchain::SwapchainImage;
|
||||
use image::sys::UnsafeImage;
|
||||
use swapchain::CapabilitiesError;
|
||||
use swapchain::ColorSpace;
|
||||
use swapchain::CompositeAlpha;
|
||||
use swapchain::PresentMode;
|
||||
use swapchain::Surface;
|
||||
use swapchain::SurfaceTransform;
|
||||
use swapchain::SurfaceSwapchainLock;
|
||||
use swapchain::SurfaceTransform;
|
||||
use sync::AccessCheckError;
|
||||
use sync::AccessError;
|
||||
use sync::AccessFlagBits;
|
||||
@ -48,11 +48,11 @@ use sync::PipelineStages;
|
||||
use sync::Semaphore;
|
||||
use sync::SharingMode;
|
||||
|
||||
use check_errors;
|
||||
use Error;
|
||||
use OomError;
|
||||
use Success;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use vk;
|
||||
|
||||
/// Tries to take ownership of an image in order to draw on it.
|
||||
@ -66,8 +66,7 @@ use vk;
|
||||
// TODO: has to make sure vkQueuePresent is called, because calling acquire_next_image many
|
||||
// times in a row is an error
|
||||
pub fn acquire_next_image(swapchain: Arc<Swapchain>, timeout: Option<Duration>)
|
||||
-> Result<(usize, SwapchainAcquireFuture), AcquireError>
|
||||
{
|
||||
-> Result<(usize, SwapchainAcquireFuture), AcquireError> {
|
||||
unsafe {
|
||||
// Check that this is not an old swapchain. From specs:
|
||||
// > swapchain must not have been replaced by being passed as the
|
||||
@ -79,30 +78,35 @@ pub fn acquire_next_image(swapchain: Arc<Swapchain>, timeout: Option<Duration>)
|
||||
|
||||
let vk = swapchain.device.pointers();
|
||||
|
||||
let semaphore = try!(Semaphore::new(swapchain.device.clone()));
|
||||
let semaphore = Semaphore::new(swapchain.device.clone())?;
|
||||
|
||||
let timeout_ns = if let Some(timeout) = timeout {
|
||||
timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
timeout
|
||||
.as_secs()
|
||||
.saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64)
|
||||
} else {
|
||||
u64::max_value()
|
||||
};
|
||||
|
||||
let mut out = mem::uninitialized();
|
||||
let r = try!(check_errors(vk.AcquireNextImageKHR(swapchain.device.internal_object(),
|
||||
swapchain.swapchain, timeout_ns,
|
||||
semaphore.internal_object(), 0,
|
||||
&mut out)));
|
||||
let r = check_errors(vk.AcquireNextImageKHR(swapchain.device.internal_object(),
|
||||
swapchain.swapchain,
|
||||
timeout_ns,
|
||||
semaphore.internal_object(),
|
||||
0,
|
||||
&mut out))?;
|
||||
|
||||
let id = match r {
|
||||
Success::Success => out as usize,
|
||||
Success::Suboptimal => out as usize, // TODO: give that info to the user
|
||||
Success::NotReady => return Err(AcquireError::Timeout),
|
||||
Success::Timeout => return Err(AcquireError::Timeout),
|
||||
s => panic!("unexpected success value: {:?}", s)
|
||||
s => panic!("unexpected success value: {:?}", s),
|
||||
};
|
||||
|
||||
Ok((id, SwapchainAcquireFuture {
|
||||
Ok((id,
|
||||
SwapchainAcquireFuture {
|
||||
swapchain: swapchain.clone(), // TODO: don't clone
|
||||
semaphore: semaphore,
|
||||
image_id: id,
|
||||
@ -205,35 +209,56 @@ impl Swapchain {
|
||||
// TODO: add `ColorSpace` parameter
|
||||
// TODO: isn't it unsafe to take the surface through an Arc when it comes to vulkano-win?
|
||||
#[inline]
|
||||
pub fn new<F, S>(device: Arc<Device>, surface: Arc<Surface>, num_images: u32, format: F,
|
||||
pub fn new<F, S>(
|
||||
device: Arc<Device>, surface: Arc<Surface>, num_images: u32, format: F,
|
||||
dimensions: [u32; 2], layers: u32, usage: ImageUsage, sharing: S,
|
||||
transform: SurfaceTransform, alpha: CompositeAlpha, mode: PresentMode,
|
||||
clipped: bool, old_swapchain: Option<&Arc<Swapchain>>)
|
||||
transform: SurfaceTransform, alpha: CompositeAlpha, mode: PresentMode, clipped: bool,
|
||||
old_swapchain: Option<&Arc<Swapchain>>)
|
||||
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), SwapchainCreationError>
|
||||
where F: FormatDesc, S: Into<SharingMode>
|
||||
where F: FormatDesc,
|
||||
S: Into<SharingMode>
|
||||
{
|
||||
Swapchain::new_inner(device, surface, num_images, format.format(),
|
||||
ColorSpace::SrgbNonLinear, dimensions, layers, usage, sharing.into(),
|
||||
transform, alpha, mode, clipped, old_swapchain.map(|s| &**s))
|
||||
Swapchain::new_inner(device,
|
||||
surface,
|
||||
num_images,
|
||||
format.format(),
|
||||
ColorSpace::SrgbNonLinear,
|
||||
dimensions,
|
||||
layers,
|
||||
usage,
|
||||
sharing.into(),
|
||||
transform,
|
||||
alpha,
|
||||
mode,
|
||||
clipped,
|
||||
old_swapchain.map(|s| &**s))
|
||||
}
|
||||
|
||||
/// Recreates the swapchain with new dimensions.
|
||||
pub fn recreate_with_dimension(&self, dimensions: [u32; 2])
|
||||
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>),
|
||||
SwapchainCreationError>
|
||||
{
|
||||
Swapchain::new_inner(self.device.clone(), self.surface.clone(), self.num_images,
|
||||
self.format, self.color_space, dimensions, self.layers, self.usage,
|
||||
self.sharing.clone(), self.transform, self.alpha, self.mode,
|
||||
self.clipped, Some(self))
|
||||
pub fn recreate_with_dimension(
|
||||
&self, dimensions: [u32; 2])
|
||||
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), SwapchainCreationError> {
|
||||
Swapchain::new_inner(self.device.clone(),
|
||||
self.surface.clone(),
|
||||
self.num_images,
|
||||
self.format,
|
||||
self.color_space,
|
||||
dimensions,
|
||||
self.layers,
|
||||
self.usage,
|
||||
self.sharing.clone(),
|
||||
self.transform,
|
||||
self.alpha,
|
||||
self.mode,
|
||||
self.clipped,
|
||||
Some(self))
|
||||
}
|
||||
|
||||
fn new_inner(device: Arc<Device>, surface: Arc<Surface>, num_images: u32, format: Format,
|
||||
color_space: ColorSpace, dimensions: [u32; 2], layers: u32, usage: ImageUsage,
|
||||
sharing: SharingMode, transform: SurfaceTransform, alpha: CompositeAlpha,
|
||||
mode: PresentMode, clipped: bool, old_swapchain: Option<&Swapchain>)
|
||||
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), SwapchainCreationError>
|
||||
{
|
||||
-> Result<(Arc<Swapchain>, Vec<Arc<SwapchainImage>>), SwapchainCreationError> {
|
||||
assert_eq!(device.instance().internal_object(),
|
||||
surface.instance().internal_object());
|
||||
|
||||
@ -247,7 +272,11 @@ impl Swapchain {
|
||||
return Err(SwapchainCreationError::UnsupportedMaxImagesCount);
|
||||
}
|
||||
}
|
||||
if !capabilities.supported_formats.iter().any(|&(f, c)| f == format && c == color_space) {
|
||||
if !capabilities
|
||||
.supported_formats
|
||||
.iter()
|
||||
.any(|&(f, c)| f == format && c == color_space)
|
||||
{
|
||||
return Err(SwapchainCreationError::UnsupportedFormat);
|
||||
}
|
||||
if dimensions[0] < capabilities.min_image_extent[0] {
|
||||
@ -265,7 +294,9 @@ impl Swapchain {
|
||||
if layers < 1 && layers > capabilities.max_image_array_layers {
|
||||
return Err(SwapchainCreationError::UnsupportedArrayLayers);
|
||||
}
|
||||
if (usage.to_usage_bits() & capabilities.supported_usage_flags.to_usage_bits()) != usage.to_usage_bits() {
|
||||
if (usage.to_usage_bits() & capabilities.supported_usage_flags.to_usage_bits()) !=
|
||||
usage.to_usage_bits()
|
||||
{
|
||||
return Err(SwapchainCreationError::UnsupportedUsageFlags);
|
||||
}
|
||||
if !capabilities.supported_transforms.supports(transform) {
|
||||
@ -288,7 +319,9 @@ impl Swapchain {
|
||||
// Checking that the surface doesn't already have a swapchain.
|
||||
if old_swapchain.is_none() {
|
||||
let has_already = surface.flag().swap(true, Ordering::AcqRel);
|
||||
if has_already { return Err(SwapchainCreationError::SurfaceInUse); }
|
||||
if has_already {
|
||||
return Err(SwapchainCreationError::SurfaceInUse);
|
||||
}
|
||||
}
|
||||
|
||||
if !device.loaded_extensions().khr_swapchain {
|
||||
@ -307,7 +340,8 @@ impl Swapchain {
|
||||
let swapchain = unsafe {
|
||||
let (sh_mode, sh_count, sh_indices) = match sharing {
|
||||
SharingMode::Exclusive(_) => (vk::SHARING_MODE_EXCLUSIVE, 0, ptr::null()),
|
||||
SharingMode::Concurrent(ref ids) => (vk::SHARING_MODE_CONCURRENT, ids.len() as u32,
|
||||
SharingMode::Concurrent(ref ids) => (vk::SHARING_MODE_CONCURRENT,
|
||||
ids.len() as u32,
|
||||
ids.as_ptr()),
|
||||
};
|
||||
|
||||
@ -319,7 +353,10 @@ impl Swapchain {
|
||||
minImageCount: num_images,
|
||||
imageFormat: format as u32,
|
||||
imageColorSpace: color_space as u32,
|
||||
imageExtent: vk::Extent2D { width: dimensions[0], height: dimensions[1] },
|
||||
imageExtent: vk::Extent2D {
|
||||
width: dimensions[0],
|
||||
height: dimensions[1],
|
||||
},
|
||||
imageArrayLayers: layers,
|
||||
imageUsage: usage.to_usage_bits(),
|
||||
imageSharingMode: sh_mode,
|
||||
@ -337,26 +374,33 @@ impl Swapchain {
|
||||
};
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateSwapchainKHR(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateSwapchainKHR(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
let image_handles = unsafe {
|
||||
let mut num = 0;
|
||||
try!(check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
|
||||
swapchain, &mut num,
|
||||
ptr::null_mut())));
|
||||
check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
|
||||
swapchain,
|
||||
&mut num,
|
||||
ptr::null_mut()))?;
|
||||
|
||||
let mut images = Vec::with_capacity(num as usize);
|
||||
try!(check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
|
||||
swapchain, &mut num,
|
||||
images.as_mut_ptr())));
|
||||
check_errors(vk.GetSwapchainImagesKHR(device.internal_object(),
|
||||
swapchain,
|
||||
&mut num,
|
||||
images.as_mut_ptr()))?;
|
||||
images.set_len(num as usize);
|
||||
images
|
||||
};
|
||||
|
||||
let images = image_handles.into_iter().enumerate().map(|(id, image)| unsafe {
|
||||
let images = image_handles
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(id, image)| unsafe {
|
||||
let dims = ImageDimensions::Dim2d {
|
||||
width: dimensions[0],
|
||||
height: dimensions[1],
|
||||
@ -364,14 +408,20 @@ impl Swapchain {
|
||||
cubemap_compatible: false,
|
||||
};
|
||||
|
||||
let img = UnsafeImage::from_raw(device.clone(), image, usage.to_usage_bits(), format,
|
||||
dims, 1, 1);
|
||||
let img = UnsafeImage::from_raw(device.clone(),
|
||||
image,
|
||||
usage.to_usage_bits(),
|
||||
format,
|
||||
dims,
|
||||
1,
|
||||
1);
|
||||
|
||||
ImageEntry {
|
||||
image: img,
|
||||
undefined_layout: AtomicBool::new(true)
|
||||
undefined_layout: AtomicBool::new(true),
|
||||
}
|
||||
}).collect::<Vec<_>>();
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let swapchain = Arc::new(Swapchain {
|
||||
device: device.clone(),
|
||||
@ -597,7 +647,7 @@ impl error::Error for SwapchainCreationError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
SwapchainCreationError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -628,7 +678,7 @@ impl From<Error> for SwapchainCreationError {
|
||||
err @ Error::NativeWindowInUse => {
|
||||
SwapchainCreationError::NativeWindowInUse
|
||||
},
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -706,26 +756,27 @@ unsafe impl GpuFuture for SwapchainAcquireFuture {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
fn check_buffer_access(
|
||||
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
Err(AccessCheckError::Unknown)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
|
||||
queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
let swapchain_image = self.swapchain.raw_image(self.image_id).unwrap();
|
||||
if swapchain_image.internal_object() != image.inner().internal_object() {
|
||||
return Err(AccessCheckError::Unknown);
|
||||
}
|
||||
|
||||
if self.swapchain.images[self.image_id].undefined_layout.load(Ordering::Relaxed) &&
|
||||
layout != ImageLayout::Undefined
|
||||
if self.swapchain.images[self.image_id]
|
||||
.undefined_layout
|
||||
.load(Ordering::Relaxed) && layout != ImageLayout::Undefined
|
||||
{
|
||||
return Err(AccessCheckError::Denied(AccessError::ImageNotInitialized {
|
||||
requested: layout
|
||||
requested: layout,
|
||||
}));
|
||||
}
|
||||
|
||||
@ -799,7 +850,7 @@ impl error::Error for AcquireError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
AcquireError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -827,14 +878,16 @@ impl From<Error> for AcquireError {
|
||||
Error::DeviceLost => AcquireError::DeviceLost,
|
||||
Error::SurfaceLost => AcquireError::SurfaceLost,
|
||||
Error::OutOfDate => AcquireError::OutOfDate,
|
||||
_ => panic!("unexpected error: {:?}", err)
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a swapchain image being presented on the screen.
|
||||
#[must_use = "Dropping this object will immediately block the thread until the GPU has finished processing the submission"]
|
||||
pub struct PresentFuture<P> where P: GpuFuture {
|
||||
pub struct PresentFuture<P>
|
||||
where P: GpuFuture
|
||||
{
|
||||
previous: P,
|
||||
queue: Arc<Queue>,
|
||||
swapchain: Arc<Swapchain>,
|
||||
@ -847,7 +900,9 @@ pub struct PresentFuture<P> where P: GpuFuture {
|
||||
finished: AtomicBool,
|
||||
}
|
||||
|
||||
impl<P> PresentFuture<P> where P: GpuFuture {
|
||||
impl<P> PresentFuture<P>
|
||||
where P: GpuFuture
|
||||
{
|
||||
/// Returns the index of the image in the list of images returned when creating the swapchain.
|
||||
#[inline]
|
||||
pub fn image_id(&self) -> usize {
|
||||
@ -861,7 +916,9 @@ impl<P> PresentFuture<P> where P: GpuFuture {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
unsafe impl<P> GpuFuture for PresentFuture<P>
|
||||
where P: GpuFuture
|
||||
{
|
||||
#[inline]
|
||||
fn cleanup_finished(&mut self) {
|
||||
self.previous.cleanup_finished();
|
||||
@ -878,7 +935,7 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
// TODO: if the swapchain image layout is not PRESENT, should add a transition command
|
||||
// buffer
|
||||
|
||||
Ok(match try!(self.previous.build_submission()) {
|
||||
Ok(match self.previous.build_submission()? {
|
||||
SubmitAnyBuilder::Empty => {
|
||||
let mut builder = SubmitPresentBuilder::new();
|
||||
builder.add_swapchain(&self.swapchain, self.image_id as u32);
|
||||
@ -890,13 +947,13 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
SubmitAnyBuilder::QueuePresent(builder)
|
||||
},
|
||||
SubmitAnyBuilder::CommandBuffer(cb) => {
|
||||
try!(cb.submit(&queue.unwrap())); // FIXME: wrong because build_submission can be called multiple times
|
||||
cb.submit(&queue.unwrap())?; // FIXME: wrong because build_submission can be called multiple times
|
||||
let mut builder = SubmitPresentBuilder::new();
|
||||
builder.add_swapchain(&self.swapchain, self.image_id as u32);
|
||||
SubmitAnyBuilder::QueuePresent(builder)
|
||||
},
|
||||
SubmitAnyBuilder::BindSparse(cb) => {
|
||||
try!(cb.submit(&queue.unwrap())); // FIXME: wrong because build_submission can be called multiple times
|
||||
cb.submit(&queue.unwrap())?; // FIXME: wrong because build_submission can be called multiple times
|
||||
let mut builder = SubmitPresentBuilder::new();
|
||||
builder.add_swapchain(&self.swapchain, self.image_id as u32);
|
||||
SubmitAnyBuilder::QueuePresent(builder)
|
||||
@ -917,11 +974,11 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
// If `flushed` already contains `true`, then `build_submission` will return `Empty`.
|
||||
|
||||
match self.build_submission()? {
|
||||
SubmitAnyBuilder::Empty => {}
|
||||
SubmitAnyBuilder::Empty => {},
|
||||
SubmitAnyBuilder::QueuePresent(present) => {
|
||||
present.submit(&self.queue)?;
|
||||
}
|
||||
_ => unreachable!()
|
||||
},
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
self.flushed.store(true, Ordering::SeqCst);
|
||||
@ -945,23 +1002,23 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
fn queue(&self) -> Option<Arc<Queue>> {
|
||||
debug_assert!(match self.previous.queue() {
|
||||
None => true,
|
||||
Some(q) => q.is_same(&self.queue)
|
||||
Some(q) => q.is_same(&self.queue),
|
||||
});
|
||||
|
||||
Some(self.queue.clone())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
fn check_buffer_access(
|
||||
&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
self.previous.check_buffer_access(buffer, exclusive, queue)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool,
|
||||
queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError> {
|
||||
let swapchain_image = self.swapchain.raw_image(self.image_id).unwrap();
|
||||
if swapchain_image.internal_object() == image.inner().internal_object() {
|
||||
// This future presents the swapchain image, which "unlocks" it. Therefore any attempt
|
||||
@ -970,19 +1027,24 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
// a later swapchain acquire future. Hence why we return `Unknown` here.
|
||||
Err(AccessCheckError::Unknown)
|
||||
} else {
|
||||
self.previous.check_image_access(image, layout, exclusive, queue)
|
||||
self.previous
|
||||
.check_image_access(image, layout, exclusive, queue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<P> DeviceOwned for PresentFuture<P> where P: GpuFuture {
|
||||
unsafe impl<P> DeviceOwned for PresentFuture<P>
|
||||
where P: GpuFuture
|
||||
{
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.queue.device()
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> Drop for PresentFuture<P> where P: GpuFuture {
|
||||
impl<P> Drop for PresentFuture<P>
|
||||
where P: GpuFuture
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
if !*self.finished.get_mut() {
|
||||
|
@ -11,12 +11,12 @@ use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use OomError;
|
||||
use Success;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use vk;
|
||||
|
||||
/// Used to block the GPU execution until an event on the CPU occurs.
|
||||
@ -47,8 +47,10 @@ impl Event {
|
||||
|
||||
let mut output = mem::uninitialized();
|
||||
let vk = device.pointers();
|
||||
try!(check_errors(vk.CreateEvent(device.internal_object(), &INFOS,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateEvent(device.internal_object(),
|
||||
&INFOS,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -74,12 +76,12 @@ impl Event {
|
||||
pub fn signaled(&self) -> Result<bool, OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
let result = try!(check_errors(vk.GetEventStatus(self.device.internal_object(),
|
||||
self.event)));
|
||||
let result = check_errors(vk.GetEventStatus(self.device.internal_object(),
|
||||
self.event))?;
|
||||
match result {
|
||||
Success::EventSet => Ok(true),
|
||||
Success::EventReset => Ok(false),
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -89,7 +91,7 @@ impl Event {
|
||||
pub fn set_raw(&mut self) -> Result<(), OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.SetEvent(self.device.internal_object(), self.event)));
|
||||
check_errors(vk.SetEvent(self.device.internal_object(), self.event))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -112,7 +114,7 @@ impl Event {
|
||||
pub fn reset_raw(&mut self) -> Result<(), OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.ResetEvent(self.device.internal_object(), self.event)));
|
||||
check_errors(vk.ResetEvent(self.device.internal_object(), self.event))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
@ -15,16 +16,15 @@ use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use Error;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use Success;
|
||||
use VulkanObject;
|
||||
use check_errors;
|
||||
use device::Device;
|
||||
use device::DeviceOwned;
|
||||
use vk;
|
||||
|
||||
/// A fence is used to know when a command buffer submission has finished its execution.
|
||||
@ -33,7 +33,9 @@ use vk;
|
||||
/// the same ressource simultaneously (except for concurrent reads). Therefore in order to know
|
||||
/// when the CPU can access a ressource again, a fence has to be used.
|
||||
#[derive(Debug)]
|
||||
pub struct Fence<D = Arc<Device>> where D: SafeDeref<Target = Device> {
|
||||
pub struct Fence<D = Arc<Device>>
|
||||
where D: SafeDeref<Target = Device>
|
||||
{
|
||||
fence: vk::Fence,
|
||||
|
||||
device: D,
|
||||
@ -44,7 +46,9 @@ pub struct Fence<D = Arc<Device>> where D: SafeDeref<Target = Device> {
|
||||
signaled: AtomicBool,
|
||||
}
|
||||
|
||||
impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
impl<D> Fence<D>
|
||||
where D: SafeDeref<Target = Device>
|
||||
{
|
||||
/// Builds a new fence.
|
||||
#[inline]
|
||||
pub fn new(device: D) -> Result<Fence<D>, OomError> {
|
||||
@ -62,13 +66,19 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
let infos = vk::FenceCreateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_FENCE_CREATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
flags: if signaled { vk::FENCE_CREATE_SIGNALED_BIT } else { 0 },
|
||||
flags: if signaled {
|
||||
vk::FENCE_CREATE_SIGNALED_BIT
|
||||
} else {
|
||||
0
|
||||
},
|
||||
};
|
||||
|
||||
let vk = device.pointers();
|
||||
let mut output = mem::uninitialized();
|
||||
try!(check_errors(vk.CreateFence(device.internal_object(), &infos,
|
||||
ptr::null(), &mut output)));
|
||||
check_errors(vk.CreateFence(device.internal_object(),
|
||||
&infos,
|
||||
ptr::null(),
|
||||
&mut output))?;
|
||||
output
|
||||
};
|
||||
|
||||
@ -83,18 +93,20 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
#[inline]
|
||||
pub fn ready(&self) -> Result<bool, OomError> {
|
||||
unsafe {
|
||||
if self.signaled.load(Ordering::Relaxed) { return Ok(true); }
|
||||
if self.signaled.load(Ordering::Relaxed) {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let vk = self.device.pointers();
|
||||
let result = try!(check_errors(vk.GetFenceStatus(self.device.internal_object(),
|
||||
self.fence)));
|
||||
let result = check_errors(vk.GetFenceStatus(self.device.internal_object(),
|
||||
self.fence))?;
|
||||
match result {
|
||||
Success::Success => {
|
||||
self.signaled.store(true, Ordering::Relaxed);
|
||||
Ok(true)
|
||||
},
|
||||
Success::NotReady => Ok(false),
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -106,18 +118,25 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
/// If you pass a duration of 0, then the function will return without blocking.
|
||||
pub fn wait(&self, timeout: Option<Duration>) -> Result<(), FenceWaitError> {
|
||||
unsafe {
|
||||
if self.signaled.load(Ordering::Relaxed) { return Ok(()); }
|
||||
if self.signaled.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let timeout_ns = if let Some(timeout) = timeout {
|
||||
timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
timeout
|
||||
.as_secs()
|
||||
.saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64)
|
||||
} else {
|
||||
u64::max_value()
|
||||
};
|
||||
|
||||
let vk = self.device.pointers();
|
||||
let r = try!(check_errors(vk.WaitForFences(self.device.internal_object(), 1,
|
||||
&self.fence, vk::TRUE, timeout_ns)));
|
||||
let r = check_errors(vk.WaitForFences(self.device.internal_object(),
|
||||
1,
|
||||
&self.fence,
|
||||
vk::TRUE,
|
||||
timeout_ns))?;
|
||||
|
||||
match r {
|
||||
Success::Success => {
|
||||
@ -127,7 +146,7 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
Success::Timeout => {
|
||||
Err(FenceWaitError::Timeout)
|
||||
},
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -138,14 +157,17 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
///
|
||||
/// Panics if not all fences belong to the same device.
|
||||
pub fn multi_wait<'a, I>(iter: I, timeout: Option<Duration>) -> Result<(), FenceWaitError>
|
||||
where I: IntoIterator<Item = &'a Fence<D>>, D: 'a
|
||||
where I: IntoIterator<Item = &'a Fence<D>>,
|
||||
D: 'a
|
||||
{
|
||||
let mut device: Option<&Device> = None;
|
||||
|
||||
let fences: SmallVec<[vk::Fence; 8]> = iter.into_iter().filter_map(|fence| {
|
||||
let fences: SmallVec<[vk::Fence; 8]> = iter.into_iter()
|
||||
.filter_map(|fence| {
|
||||
match &mut device {
|
||||
dev @ &mut None => *dev = Some(&*fence.device),
|
||||
&mut Some(ref dev) if &**dev as *const Device == &*fence.device as *const Device => {},
|
||||
&mut Some(ref dev)
|
||||
if &**dev as *const Device == &*fence.device as *const Device => {},
|
||||
_ => panic!("Tried to wait for multiple fences that didn't belong to the \
|
||||
same device"),
|
||||
};
|
||||
@ -155,10 +177,13 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
} else {
|
||||
Some(fence.fence)
|
||||
}
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
let timeout_ns = if let Some(timeout) = timeout {
|
||||
timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
timeout
|
||||
.as_secs()
|
||||
.saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64)
|
||||
} else {
|
||||
u64::max_value()
|
||||
@ -167,8 +192,11 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
let r = if let Some(device) = device {
|
||||
unsafe {
|
||||
let vk = device.pointers();
|
||||
try!(check_errors(vk.WaitForFences(device.internal_object(), fences.len() as u32,
|
||||
fences.as_ptr(), vk::TRUE, timeout_ns)))
|
||||
check_errors(vk.WaitForFences(device.internal_object(),
|
||||
fences.len() as u32,
|
||||
fences.as_ptr(),
|
||||
vk::TRUE,
|
||||
timeout_ns))?
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
@ -177,7 +205,7 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
match r {
|
||||
Success::Success => Ok(()),
|
||||
Success::Timeout => Err(FenceWaitError::Timeout),
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -200,25 +228,32 @@ impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
/// - Panics if not all fences belong to the same device.
|
||||
///
|
||||
pub fn multi_reset<'a, I>(iter: I)
|
||||
where I: IntoIterator<Item = &'a mut Fence<D>>, D: 'a
|
||||
where I: IntoIterator<Item = &'a mut Fence<D>>,
|
||||
D: 'a
|
||||
{
|
||||
let mut device: Option<&Device> = None;
|
||||
|
||||
let fences: SmallVec<[vk::Fence; 8]> = iter.into_iter().map(|fence| {
|
||||
let fences: SmallVec<[vk::Fence; 8]> = iter.into_iter()
|
||||
.map(|fence| {
|
||||
match &mut device {
|
||||
dev @ &mut None => *dev = Some(&*fence.device),
|
||||
&mut Some(ref dev) if &**dev as *const Device == &*fence.device as *const Device => {},
|
||||
_ => panic!("Tried to reset multiple fences that didn't belong to the same device"),
|
||||
&mut Some(ref dev)
|
||||
if &**dev as *const Device == &*fence.device as *const Device => {},
|
||||
_ => panic!("Tried to reset multiple fences that didn't belong to the same \
|
||||
device"),
|
||||
};
|
||||
|
||||
fence.signaled.store(false, Ordering::Relaxed);
|
||||
fence.fence
|
||||
}).collect();
|
||||
})
|
||||
.collect();
|
||||
|
||||
if let Some(device) = device {
|
||||
unsafe {
|
||||
let vk = device.pointers();
|
||||
vk.ResetFences(device.internal_object(), fences.len() as u32, fences.as_ptr());
|
||||
vk.ResetFences(device.internal_object(),
|
||||
fences.len() as u32,
|
||||
fences.as_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -231,7 +266,9 @@ unsafe impl DeviceOwned for Fence {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> VulkanObject for Fence<D> where D: SafeDeref<Target = Device> {
|
||||
unsafe impl<D> VulkanObject for Fence<D>
|
||||
where D: SafeDeref<Target = Device>
|
||||
{
|
||||
type Object = vk::Fence;
|
||||
|
||||
#[inline]
|
||||
@ -240,7 +277,9 @@ unsafe impl<D> VulkanObject for Fence<D> where D: SafeDeref<Target = Device> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drop for Fence<D> where D: SafeDeref<Target = Device> {
|
||||
impl<D> Drop for Fence<D>
|
||||
where D: SafeDeref<Target = Device>
|
||||
{
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
@ -277,7 +316,7 @@ impl error::Error for FenceWaitError {
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
match *self {
|
||||
FenceWaitError::OomError(ref err) => Some(err),
|
||||
_ => None
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -296,7 +335,7 @@ impl From<Error> for FenceWaitError {
|
||||
Error::OutOfHostMemory => FenceWaitError::OomError(From::from(err)),
|
||||
Error::OutOfDeviceMemory => FenceWaitError::OomError(From::from(err)),
|
||||
Error::DeviceLost => FenceWaitError::DeviceLostError,
|
||||
_ => panic!("Unexpected error value: {}", err as i32)
|
||||
_ => panic!("Unexpected error value: {}", err as i32),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -348,7 +387,8 @@ mod tests {
|
||||
let fence1 = Fence::signaled(device1.clone()).unwrap();
|
||||
let fence2 = Fence::signaled(device2.clone()).unwrap();
|
||||
|
||||
let _ = Fence::multi_wait([&fence1, &fence2].iter().cloned(), Some(Duration::new(0, 10)));
|
||||
let _ = Fence::multi_wait([&fence1, &fence2].iter().cloned(),
|
||||
Some(Duration::new(0, 10)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user