hal/vk: instance and adapter

This commit is contained in:
Dzmitry Malyshau 2021-06-11 02:09:03 -04:00
parent 75b6ecea58
commit dc340421c4
12 changed files with 1925 additions and 14 deletions

37
Cargo.lock generated
View File

@ -60,6 +60,15 @@ dependencies = [
"serde",
]
[[package]]
name = "ash"
version = "0.32.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06063a002a77d2734631db74e8f4ce7148b77fe522e6bca46f2ae7774fd48112"
dependencies = [
"libloading 0.7.0",
]
[[package]]
name = "async-executor"
version = "1.4.1"
@ -517,7 +526,7 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b11f15d1e3268f140f68d390637d5e76d849782d971ae7063e0da69fe9709a76"
dependencies = [
"libloading",
"libloading 0.6.5",
]
[[package]]
@ -741,6 +750,12 @@ dependencies = [
"hashbrown",
]
[[package]]
name = "inplace_it"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90953f308a79fe6d62a4643e51f848fbfddcd05975a38e69fdf4ab86a7baf7ca"
[[package]]
name = "instant"
version = "0.1.8"
@ -824,6 +839,16 @@ dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "libloading"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a"
dependencies = [
"cfg-if 1.0.0",
"winapi 0.3.9",
]
[[package]]
name = "lock_api"
version = "0.4.1"
@ -1435,6 +1460,12 @@ version = "0.6.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189"
[[package]]
name = "renderdoc-sys"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1382d1f0a252c4bf97dc20d979a2fdd05b024acd7c2ed0f7595d7817666a157"
[[package]]
name = "ron"
version = "0.6.2"
@ -1915,16 +1946,20 @@ name = "wgpu-hal"
version = "0.1.0"
dependencies = [
"arrayvec",
"ash",
"bitflags",
"block",
"foreign-types",
"fxhash",
"inplace_it",
"libloading 0.7.0",
"log",
"metal",
"naga",
"objc",
"parking_lot",
"raw-window-handle",
"renderdoc-sys",
"thiserror",
"wgpu-types",
"winit",

View File

@ -48,7 +48,6 @@ version = "0.8"
path = "../wgpu-hal"
package = "wgpu-hal"
version = "0.1"
features = ["empty"]
[target.'cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))'.dependencies]
hal = { path = "../wgpu-hal", package = "wgpu-hal", features = ["metal"] }

View File

@ -13,24 +13,30 @@ license = "MIT OR Apache-2.0"
[features]
default = []
empty = []
metal = ["block", "foreign-types", "mtl", "objc", "parking_lot", "naga/msl-out"]
metal = ["naga/msl-out", "block", "foreign-types", "mtl", "objc", "parking_lot"]
vulkan = ["naga/spv-out", "ash", "libloading", "inplace_it", "renderdoc-sys"]
[dependencies]
arrayvec = "0.5"
bitflags = "1.0"
fxhash = "0.2.1"
log = "0.4"
parking_lot = { version = "0.11", optional = true }
raw-window-handle = "0.3"
thiserror = "1"
wgt = { package = "wgpu-types", path = "../wgpu-types" }
# backends
# backends common
arrayvec = "0.5"
fxhash = "0.2.1"
libloading = { version = "0.7", optional = true }
log = "0.4"
parking_lot = { version = "0.11", optional = true }
# backend: Metal
block = { version = "0.1", optional = true }
foreign-types = { version = "0.3", optional = true }
mtl = { package = "metal", version = "0.22", optional = true }
objc = { version = "0.2.5", optional = true }
# backend: Vulkan
ash = { version = "0.32", optional = true }
inplace_it = { version ="0.3.3", optional = true }
renderdoc-sys = { version = "0.7.1", optional = true }
[dependencies.naga]
git = "https://github.com/gfx-rs/naga"

View File

@ -1,3 +1,11 @@
pub mod db {
pub mod intel {
pub const VENDOR: u32 = 0x8086;
pub const DEVICE_KABY_LAKE_MASK: u32 = 0x5900;
pub const DEVICE_SKY_LAKE_MASK: u32 = 0x1900;
}
}
pub fn map_naga_stage(stage: naga::ShaderStage) -> wgt::ShaderStage {
match stage {
naga::ShaderStage::Vertex => wgt::ShaderStage::VERTEX,

View File

@ -37,7 +37,7 @@ impl crate::Api for Api {
}
impl crate::Instance<Api> for Context {
unsafe fn init() -> Result<Self, crate::InstanceError> {
unsafe fn init(desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
Ok(Context)
}
unsafe fn create_surface(

View File

@ -41,13 +41,13 @@
)]
pub mod aux;
#[cfg(feature = "empty")]
mod empty;
#[cfg(feature = "metal")]
mod metal;
#[cfg(feature = "vulkan")]
mod vulkan;
pub mod api {
#[cfg(feature = "empty")]
pub use super::empty::Api as Empty;
#[cfg(feature = "metal")]
pub use super::metal::Api as Metal;
@ -143,7 +143,7 @@ pub trait Api: Clone + Sized {
}
pub trait Instance<A: Api>: Sized + Send + Sync {
unsafe fn init() -> Result<Self, InstanceError>;
unsafe fn init(desc: &InstanceDescriptor) -> Result<Self, InstanceError>;
unsafe fn create_surface(
&self,
rwh: &impl raw_window_handle::HasRawWindowHandle,
@ -452,6 +452,16 @@ pub trait CommandBuffer<A: Api>: Send + Sync {
unsafe fn dispatch_indirect(&mut self, buffer: &A::Buffer, offset: wgt::BufferAddress);
}
bitflags!(
/// Instance initialization flags.
pub struct InstanceFlag: u32 {
/// Generate debug information in shaders and objects.
const DEBUG = 0x1;
/// Enable validation, if possible.
const VALIDATION = 0x2;
}
);
bitflags!(
/// Texture format capability flags.
pub struct TextureFormatCapability: u32 {
@ -574,6 +584,12 @@ bitflags::bitflags! {
}
}
#[derive(Clone, Debug)]
pub struct InstanceDescriptor<'a> {
name: &'a str,
flags: InstanceFlag,
}
#[derive(Clone, Debug)]
pub struct Alignments {
/// The alignment of the start of the buffer used as a GPU copy source.

View File

@ -48,9 +48,11 @@ impl crate::Api for Api {
pub struct Instance {}
impl crate::Instance<Api> for Instance {
unsafe fn init() -> Result<Self, crate::InstanceError> {
unsafe fn init(_desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
//TODO: enable `METAL_DEVICE_WRAPPER_TYPE` environment based on the flags?
Ok(Instance {})
}
unsafe fn create_surface(
&self,
has_handle: &impl raw_window_handle::HasRawWindowHandle,

View File

@ -0,0 +1,846 @@
use super::conv;
use ash::{
extensions::khr,
version::{DeviceV1_0, InstanceV1_0},
vk,
};
use std::{ffi::CStr, mem, ptr, sync::Arc};
const fn indexing_features() -> wgt::Features {
wgt::Features::UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING
| wgt::Features::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING
| wgt::Features::STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING
}
/// Aggregate of the `vk::PhysicalDevice*Features` structs used by `gfx`.
#[derive(Debug, Default)]
pub struct PhysicalDeviceFeatures {
core: vk::PhysicalDeviceFeatures,
vulkan_1_2: Option<vk::PhysicalDeviceVulkan12Features>,
descriptor_indexing: Option<vk::PhysicalDeviceDescriptorIndexingFeaturesEXT>,
imageless_framebuffer: Option<vk::PhysicalDeviceImagelessFramebufferFeaturesKHR>,
}
// This is safe because the structs have `p_next: *mut c_void`, which we null out/never read.
unsafe impl Send for PhysicalDeviceFeatures {}
unsafe impl Sync for PhysicalDeviceFeatures {}
impl PhysicalDeviceFeatures {
/// Add the members of `self` into `info.enabled_features` and its `p_next` chain.
fn add_to_device_create_builder<'a>(&'a mut self, info: &mut vk::DeviceCreateInfoBuilder<'a>) {
*info = info.enabled_features(&self.core);
if let Some(ref mut feature) = self.vulkan_1_2 {
*info = info.push_next(feature);
}
if let Some(ref mut feature) = self.descriptor_indexing {
*info = info.push_next(feature);
}
if let Some(ref mut feature) = self.imageless_framebuffer {
*info = info.push_next(feature);
}
}
/// Create a `PhysicalDeviceFeatures` that will be used to create a logical device.
///
/// `requested_features` should be the same as what was used to generate `enabled_extensions`.
fn from_extensions_and_requested_features(
api_version: u32,
enabled_extensions: &[&'static CStr],
requested_features: wgt::Features,
downlevel_flags: wgt::DownlevelFlags,
supports_vulkan12_imageless_framebuffer: bool,
) -> Self {
// This must follow the "Valid Usage" requirements of [`VkDeviceCreateInfo`](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkDeviceCreateInfo.html).
Self {
// vk::PhysicalDeviceFeatures is a struct composed of Bool32's while
// Features is a bitfield so we need to map everything manually
core: vk::PhysicalDeviceFeatures::builder()
.robust_buffer_access(true) //TODO: make configurable
.independent_blend(true)
.sample_rate_shading(true)
.image_cube_array(
downlevel_flags.contains(wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES),
)
//.dual_src_blend(requested_features.contains(wgt::Features::DUAL_SRC_BLENDING))
.multi_draw_indirect(
requested_features.contains(wgt::Features::MULTI_DRAW_INDIRECT),
)
.depth_clamp(requested_features.contains(wgt::Features::DEPTH_CLAMPING))
.fill_mode_non_solid(
requested_features.contains(wgt::Features::NON_FILL_POLYGON_MODE),
)
//.depth_bounds(requested_features.contains(wgt::Features::DEPTH_BOUNDS))
//.alpha_to_one(requested_features.contains(wgt::Features::ALPHA_TO_ONE))
//.multi_viewport(requested_features.contains(wgt::Features::MULTI_VIEWPORTS))
.sampler_anisotropy(
downlevel_flags.contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING),
)
.texture_compression_etc2(
requested_features.contains(wgt::Features::TEXTURE_COMPRESSION_ETC2),
)
.texture_compression_astc_ldr(
requested_features.contains(wgt::Features::TEXTURE_COMPRESSION_ASTC_LDR),
)
.texture_compression_bc(
requested_features.contains(wgt::Features::TEXTURE_COMPRESSION_BC),
)
//.occlusion_query_precise(requested_features.contains(wgt::Features::PRECISE_OCCLUSION_QUERY))
.pipeline_statistics_query(
requested_features.contains(wgt::Features::PIPELINE_STATISTICS_QUERY),
)
.vertex_pipeline_stores_and_atomics(
requested_features.contains(wgt::Features::VERTEX_WRITABLE_STORAGE),
)
.fragment_stores_and_atomics(
downlevel_flags.contains(wgt::DownlevelFlags::STORAGE_IMAGES),
)
//.shader_image_gather_extended(
//.shader_storage_image_extended_formats(
.shader_uniform_buffer_array_dynamic_indexing(
requested_features
.contains(wgt::Features::UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING),
)
.shader_sampled_image_array_dynamic_indexing(
requested_features
.contains(wgt::Features::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING),
)
.shader_storage_buffer_array_dynamic_indexing(
requested_features
.contains(wgt::Features::STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING),
)
//.shader_storage_image_array_dynamic_indexing(
//.shader_clip_distance(requested_features.contains(wgt::Features::SHADER_CLIP_DISTANCE))
//.shader_cull_distance(requested_features.contains(wgt::Features::SHADER_CULL_DISTANCE))
.shader_float64(requested_features.contains(wgt::Features::SHADER_FLOAT64))
//.shader_int64(requested_features.contains(wgt::Features::SHADER_INT64))
//.shader_int16(requested_features.contains(wgt::Features::SHADER_INT16))
//.shader_resource_residency(requested_features.contains(wgt::Features::SHADER_RESOURCE_RESIDENCY))
.build(),
vulkan_1_2: if api_version >= vk::API_VERSION_1_2 {
Some(
vk::PhysicalDeviceVulkan12Features::builder()
//.sampler_mirror_clamp_to_edge(requested_features.contains(wgt::Features::SAMPLER_MIRROR_CLAMP_EDGE))
.draw_indirect_count(
requested_features.contains(wgt::Features::MULTI_DRAW_INDIRECT_COUNT),
)
.descriptor_indexing(requested_features.intersects(indexing_features()))
.shader_sampled_image_array_non_uniform_indexing(
requested_features.contains(
wgt::Features::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
),
)
//.shader_storage_image_array_non_uniform_indexing(
//.shader_storage_buffer_array_non_uniform_indexing(
.shader_uniform_buffer_array_non_uniform_indexing(
requested_features
.contains(wgt::Features::UNIFORM_BUFFER_ARRAY_NON_UNIFORM_INDEXING),
)
.runtime_descriptor_array(
requested_features.contains(wgt::Features::UNSIZED_BINDING_ARRAY),
)
//.sampler_filter_minmax(requested_features.contains(wgt::Features::SAMPLER_REDUCTION))
.imageless_framebuffer(supports_vulkan12_imageless_framebuffer)
.build(),
)
} else {
None
},
descriptor_indexing: if enabled_extensions
.contains(&vk::ExtDescriptorIndexingFn::name())
{
Some(
vk::PhysicalDeviceDescriptorIndexingFeaturesEXT::builder()
.shader_sampled_image_array_non_uniform_indexing(
requested_features.contains(
wgt::Features::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
),
)
//.shader_storage_image_array_non_uniform_indexing(
//.shader_storage_buffer_array_non_uniform_indexing(
.shader_uniform_buffer_array_non_uniform_indexing(
requested_features
.contains(wgt::Features::UNIFORM_BUFFER_ARRAY_NON_UNIFORM_INDEXING),
)
.runtime_descriptor_array(
requested_features.contains(wgt::Features::UNSIZED_BINDING_ARRAY),
)
.build(),
)
} else {
None
},
imageless_framebuffer: if enabled_extensions
.contains(&vk::KhrImagelessFramebufferFn::name())
{
Some(
vk::PhysicalDeviceImagelessFramebufferFeaturesKHR::builder()
.imageless_framebuffer(true)
.build(),
)
} else {
None
},
}
}
fn to_wgpu(&self, caps: &PhysicalDeviceCapabilities) -> (wgt::Features, wgt::DownlevelFlags) {
use wgt::{DownlevelFlags as Df, Features as F};
let mut features = F::empty()
| F::ADDRESS_MODE_CLAMP_TO_BORDER
| F::SAMPLED_TEXTURE_BINDING_ARRAY
| F::BUFFER_BINDING_ARRAY;
let mut dl_flags = Df::COMPARISON_SAMPLERS;
dl_flags.set(Df::CUBE_ARRAY_TEXTURES, self.core.image_cube_array != 0);
dl_flags.set(Df::ANISOTROPIC_FILTERING, self.core.sampler_anisotropy != 0);
dl_flags.set(
Df::STORAGE_IMAGES,
self.core.fragment_stores_and_atomics != 0,
);
//if self.core.dual_src_blend != 0
features.set(F::MULTI_DRAW_INDIRECT, self.core.multi_draw_indirect != 0);
features.set(F::DEPTH_CLAMPING, self.core.depth_clamp != 0);
features.set(F::NON_FILL_POLYGON_MODE, self.core.fill_mode_non_solid != 0);
//if self.core.depth_bounds != 0 {
//if self.core.alpha_to_one != 0 {
//if self.core.multi_viewport != 0 {
features.set(
F::TEXTURE_COMPRESSION_ETC2,
self.core.texture_compression_etc2 != 0,
);
features.set(
F::TEXTURE_COMPRESSION_ASTC_LDR,
self.core.texture_compression_astc_ldr != 0,
);
features.set(
F::TEXTURE_COMPRESSION_BC,
self.core.texture_compression_bc != 0,
);
//if self.core.occlusion_query_precise != 0 {
//if self.core.pipeline_statistics_query != 0 { //TODO
features.set(
F::VERTEX_WRITABLE_STORAGE,
self.core.vertex_pipeline_stores_and_atomics != 0,
);
//if self.core.shader_image_gather_extended != 0 {
//if self.core.shader_storage_image_extended_formats != 0 {
features.set(
F::UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING,
self.core.shader_uniform_buffer_array_dynamic_indexing != 0,
);
features.set(
F::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING,
self.core.shader_sampled_image_array_dynamic_indexing != 0,
);
features.set(
F::STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING,
self.core.shader_storage_buffer_array_dynamic_indexing != 0,
);
//if self.core.shader_storage_image_array_dynamic_indexing != 0 {
//if self.core.shader_clip_distance != 0 {
//if self.core.shader_cull_distance != 0 {
features.set(F::SHADER_FLOAT64, self.core.shader_float64 != 0);
//if self.core.shader_int64 != 0 {
//if self.core.shader_int16 != 0 {
//if caps.supports_extension(vk::KhrSamplerMirrorClampToEdgeFn::name()) {
//if caps.supports_extension(vk::ExtSamplerFilterMinmaxFn::name()) {
features.set(
F::MULTI_DRAW_INDIRECT_COUNT,
caps.supports_extension(khr::DrawIndirectCount::name()),
);
features.set(
F::CONSERVATIVE_RASTERIZATION,
caps.supports_extension(vk::ExtConservativeRasterizationFn::name()),
);
if let Some(ref vulkan_1_2) = self.vulkan_1_2 {
if vulkan_1_2.shader_sampled_image_array_non_uniform_indexing != 0 {
features |= F::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING;
}
//if vulkan_1_2.shader_storage_image_array_non_uniform_indexing != 0 {
//if vulkan_1_2.shader_storage_buffer_array_non_uniform_indexing != 0 {
if vulkan_1_2.shader_uniform_buffer_array_non_uniform_indexing != 0 {
features |= F::UNIFORM_BUFFER_ARRAY_NON_UNIFORM_INDEXING;
}
if vulkan_1_2.runtime_descriptor_array != 0 {
features |= F::UNSIZED_BINDING_ARRAY;
}
//if vulkan_1_2.sampler_mirror_clamp_to_edge != 0 {
//if vulkan_1_2.sampler_filter_minmax != 0 {
if vulkan_1_2.draw_indirect_count != 0 {
features |= F::MULTI_DRAW_INDIRECT_COUNT;
}
}
if let Some(ref descriptor_indexing) = self.descriptor_indexing {
if descriptor_indexing.shader_sampled_image_array_non_uniform_indexing != 0 {
features |= F::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING;
}
//if descriptor_indexing.shader_storage_image_array_non_uniform_indexing != 0 {
//if descriptor_indexing.shader_storage_buffer_array_non_uniform_indexing != 0 {
if descriptor_indexing.shader_uniform_buffer_array_non_uniform_indexing != 0 {
features |= F::UNIFORM_BUFFER_ARRAY_NON_UNIFORM_INDEXING;
}
if descriptor_indexing.runtime_descriptor_array != 0 {
features |= F::UNSIZED_BINDING_ARRAY;
}
}
(features, dl_flags)
}
}
/// Information gathered about a physical device capabilities.
pub struct PhysicalDeviceCapabilities {
supported_extensions: Vec<vk::ExtensionProperties>,
properties: vk::PhysicalDeviceProperties,
}
impl PhysicalDeviceCapabilities {
fn supports_extension(&self, extension: &CStr) -> bool {
self.supported_extensions
.iter()
.any(|ep| unsafe { CStr::from_ptr(ep.extension_name.as_ptr()) } == extension)
}
/// Map `requested_features` to the list of Vulkan extension strings required to create the logical device.
fn get_required_extensions(&self, requested_features: wgt::Features) -> Vec<&'static CStr> {
let mut extensions = Vec::new();
extensions.push(khr::Swapchain::name());
if self.properties.api_version < vk::API_VERSION_1_1 {
extensions.push(vk::KhrMaintenance1Fn::name());
extensions.push(vk::KhrMaintenance2Fn::name());
// `VK_AMD_negative_viewport_height` is obsoleted by `VK_KHR_maintenance1` and must not be enabled alongside `VK_KHR_maintenance1` or a 1.1+ device.
if !self.supports_extension(vk::KhrMaintenance1Fn::name()) {
extensions.push(vk::AmdNegativeViewportHeightFn::name());
}
}
if self.properties.api_version < vk::API_VERSION_1_2 {
if self.supports_extension(vk::KhrImagelessFramebufferFn::name()) {
extensions.push(vk::KhrImagelessFramebufferFn::name());
extensions.push(vk::KhrImageFormatListFn::name()); // Required for `KhrImagelessFramebufferFn`
}
extensions.push(vk::ExtSamplerFilterMinmaxFn::name());
if requested_features.intersects(indexing_features()) {
extensions.push(vk::ExtDescriptorIndexingFn::name());
if self.properties.api_version < vk::API_VERSION_1_1 {
extensions.push(vk::KhrMaintenance3Fn::name());
}
}
//extensions.push(vk::KhrSamplerMirrorClampToEdgeFn::name());
//extensions.push(vk::ExtSamplerFilterMinmaxFn::name());
if requested_features.contains(wgt::Features::MULTI_DRAW_INDIRECT_COUNT) {
extensions.push(khr::DrawIndirectCount::name());
}
}
if requested_features.contains(wgt::Features::CONSERVATIVE_RASTERIZATION) {
extensions.push(vk::ExtConservativeRasterizationFn::name());
}
extensions
}
fn to_wgpu_limits(&self) -> wgt::Limits {
let limits = &self.properties.limits;
wgt::Limits {
max_texture_dimension_1d: limits.max_image_dimension1_d,
max_texture_dimension_2d: limits.max_image_dimension2_d,
max_texture_dimension_3d: limits.max_image_dimension3_d,
max_texture_array_layers: limits.max_image_array_layers,
max_bind_groups: limits
.max_bound_descriptor_sets
.min(crate::MAX_BIND_GROUPS as u32),
max_dynamic_uniform_buffers_per_pipeline_layout: limits
.max_descriptor_set_uniform_buffers_dynamic,
max_dynamic_storage_buffers_per_pipeline_layout: limits
.max_descriptor_set_storage_buffers_dynamic,
max_sampled_textures_per_shader_stage: limits.max_per_stage_descriptor_sampled_images,
max_samplers_per_shader_stage: limits.max_per_stage_descriptor_samplers,
max_storage_buffers_per_shader_stage: limits.max_per_stage_descriptor_storage_buffers,
max_storage_textures_per_shader_stage: limits.max_per_stage_descriptor_storage_images,
max_uniform_buffers_per_shader_stage: limits.max_per_stage_descriptor_uniform_buffers,
max_uniform_buffer_binding_size: limits.max_uniform_buffer_range,
max_storage_buffer_binding_size: limits.max_storage_buffer_range,
max_vertex_buffers: limits
.max_vertex_input_bindings
.min(crate::MAX_VERTEX_BUFFERS as u32),
max_vertex_attributes: limits.max_vertex_input_attributes,
max_vertex_buffer_array_stride: limits.max_vertex_input_binding_stride,
max_push_constant_size: limits.max_push_constants_size,
}
}
fn to_hal_alignments(&self) -> crate::Alignments {
let limits = &self.properties.limits;
crate::Alignments {
buffer_copy_offset: wgt::BufferSize::new(limits.optimal_buffer_copy_offset_alignment)
.unwrap(),
buffer_copy_pitch: wgt::BufferSize::new(limits.optimal_buffer_copy_row_pitch_alignment)
.unwrap(),
storage_buffer_offset: wgt::BufferSize::new(limits.min_storage_buffer_offset_alignment)
.unwrap(),
uniform_buffer_offset: wgt::BufferSize::new(limits.min_uniform_buffer_offset_alignment)
.unwrap(),
}
}
}
impl super::InstanceShared {
#[allow(trivial_casts)] // false positives
fn inspect(
&self,
phd: vk::PhysicalDevice,
) -> (PhysicalDeviceCapabilities, PhysicalDeviceFeatures) {
let capabilities = unsafe {
PhysicalDeviceCapabilities {
supported_extensions: self.raw.enumerate_device_extension_properties(phd).unwrap(),
properties: self.raw.get_physical_device_properties(phd),
}
};
let mut features = PhysicalDeviceFeatures::default();
features.core = if let Some(ref get_device_properties) = self.get_physical_device_properties
{
let core = vk::PhysicalDeviceFeatures::builder().build();
let mut features2 = vk::PhysicalDeviceFeatures2KHR::builder()
.features(core)
.build();
if capabilities.properties.api_version >= vk::API_VERSION_1_2 {
features.vulkan_1_2 = Some(vk::PhysicalDeviceVulkan12Features::builder().build());
let mut_ref = features.vulkan_1_2.as_mut().unwrap();
mut_ref.p_next = mem::replace(&mut features2.p_next, mut_ref as *mut _ as *mut _);
}
if capabilities.supports_extension(vk::ExtDescriptorIndexingFn::name()) {
features.descriptor_indexing =
Some(vk::PhysicalDeviceDescriptorIndexingFeaturesEXT::builder().build());
let mut_ref = features.descriptor_indexing.as_mut().unwrap();
mut_ref.p_next = mem::replace(&mut features2.p_next, mut_ref as *mut _ as *mut _);
}
// `VK_KHR_imageless_framebuffer` is promoted to 1.2, but has no changes, so we can keep using the extension unconditionally.
if capabilities.supports_extension(vk::KhrImagelessFramebufferFn::name()) {
features.imageless_framebuffer =
Some(vk::PhysicalDeviceImagelessFramebufferFeaturesKHR::builder().build());
let mut_ref = features.imageless_framebuffer.as_mut().unwrap();
mut_ref.p_next = mem::replace(&mut features2.p_next, mut_ref as *mut _ as *mut _);
}
unsafe {
get_device_properties.get_physical_device_features2_khr(phd, &mut features2);
}
features2.features
} else {
unsafe { self.raw.get_physical_device_features(phd) }
};
/// # Safety
/// `T` must be a struct bigger than `vk::BaseOutStructure`.
unsafe fn null_p_next<T>(features: &mut Option<T>) {
if let Some(features) = features {
// This is technically invalid since `vk::BaseOutStructure` and `T` will probably never have the same size.
mem::transmute::<_, &mut vk::BaseOutStructure>(features).p_next = ptr::null_mut();
}
}
unsafe {
null_p_next(&mut features.vulkan_1_2);
null_p_next(&mut features.descriptor_indexing);
null_p_next(&mut features.imageless_framebuffer);
}
(capabilities, features)
}
}
impl super::Instance {
pub(super) fn expose_adapter(
&self,
phd: vk::PhysicalDevice,
) -> Option<crate::ExposedAdapter<super::Api>> {
let (phd_capabilities, phd_features) = self.shared.inspect(phd);
let info = wgt::AdapterInfo {
name: unsafe {
CStr::from_ptr(phd_capabilities.properties.device_name.as_ptr())
.to_str()
.unwrap_or("?")
.to_owned()
},
vendor: phd_capabilities.properties.vendor_id as usize,
device: phd_capabilities.properties.device_id as usize,
device_type: match phd_capabilities.properties.device_type {
ash::vk::PhysicalDeviceType::OTHER => wgt::DeviceType::Other,
ash::vk::PhysicalDeviceType::INTEGRATED_GPU => wgt::DeviceType::IntegratedGpu,
ash::vk::PhysicalDeviceType::DISCRETE_GPU => wgt::DeviceType::DiscreteGpu,
ash::vk::PhysicalDeviceType::VIRTUAL_GPU => wgt::DeviceType::VirtualGpu,
ash::vk::PhysicalDeviceType::CPU => wgt::DeviceType::Cpu,
_ => wgt::DeviceType::Other,
},
backend: wgt::Backend::Vulkan,
};
let (mut available_features, downlevel_flags) = phd_features.to_wgpu(&phd_capabilities);
{
use crate::aux::db;
// see https://github.com/gfx-rs/gfx/issues/1930
let _is_windows_intel_dual_src_bug = cfg!(windows)
&& phd_capabilities.properties.vendor_id == db::intel::VENDOR
&& (phd_capabilities.properties.device_id & db::intel::DEVICE_KABY_LAKE_MASK
== db::intel::DEVICE_KABY_LAKE_MASK
|| phd_capabilities.properties.device_id & db::intel::DEVICE_SKY_LAKE_MASK
== db::intel::DEVICE_SKY_LAKE_MASK);
};
if phd_features.core.sample_rate_shading == 0 {
log::error!(
"sample_rate_shading feature is not supported, hiding the adapter: {}",
info.name
);
return None;
}
if !phd_capabilities.supports_extension(vk::AmdNegativeViewportHeightFn::name())
&& !phd_capabilities.supports_extension(vk::KhrMaintenance1Fn::name())
&& phd_capabilities.properties.api_version < vk::API_VERSION_1_2
{
log::error!(
"viewport Y-flip is not supported, hiding the adapter: {}",
info.name
);
return None;
}
let queue_families = unsafe {
self.shared
.raw
.get_physical_device_queue_family_properties(phd)
};
let adapter = super::Adapter {
raw: phd,
instance: Arc::clone(&self.shared),
queue_families,
known_memory_flags: vk::MemoryPropertyFlags::DEVICE_LOCAL
| vk::MemoryPropertyFlags::HOST_VISIBLE
| vk::MemoryPropertyFlags::HOST_COHERENT
| vk::MemoryPropertyFlags::HOST_CACHED
| vk::MemoryPropertyFlags::LAZILY_ALLOCATED,
phd_capabilities,
phd_features,
available_features,
downlevel_flags,
};
let capabilities = crate::Capabilities {
limits: phd_capabilities.to_wgpu_limits(),
alignments: phd_capabilities.to_hal_alignments(),
downlevel: wgt::DownlevelCapabilities {
flags: downlevel_flags,
shader_model: wgt::ShaderModel::Sm5, //TODO?
},
};
Some(crate::ExposedAdapter {
adapter,
info,
features: available_features,
capabilities,
})
}
}
impl crate::Adapter<super::Api> for super::Adapter {
unsafe fn open(
&self,
features: wgt::Features,
) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> {
let enabled_extensions = {
let (supported_extensions, unsupported_extensions) = self
.phd_capabilities
.get_required_extensions(features)
.iter()
.partition::<Vec<&CStr>, _>(|&&extension| {
self.phd_capabilities.supports_extension(extension)
});
if !unsupported_extensions.is_empty() {
log::warn!("Missing extensions: {:?}", unsupported_extensions);
}
log::debug!("Supported extensions: {:?}", supported_extensions);
supported_extensions
};
let valid_ash_memory_types = {
let mem_properties = self
.instance
.raw
.get_physical_device_memory_properties(self.raw);
mem_properties.memory_types[..mem_properties.memory_type_count as usize]
.iter()
.enumerate()
.fold(0, |u, (i, mem)| {
if self.known_memory_flags.contains(mem.property_flags) {
u | (1 << i)
} else {
u
}
})
};
let supports_vulkan12_imageless_framebuffer = self
.phd_features
.vulkan_1_2
.map_or(false, |features| features.imageless_framebuffer == vk::TRUE);
// Create device
let raw_device = {
let family_info = vk::DeviceQueueCreateInfo::builder()
.flags(vk::DeviceQueueCreateFlags::empty())
.build();
let family_infos = [family_info];
let str_pointers = enabled_extensions
.iter()
.map(|&s| {
// Safe because `enabled_extensions` entries have static lifetime.
s.as_ptr()
})
.collect::<Vec<_>>();
let enabled_phd_features =
PhysicalDeviceFeatures::from_extensions_and_requested_features(
self.phd_capabilities.properties.api_version,
&enabled_extensions,
features,
self.downlevel_flags,
supports_vulkan12_imageless_framebuffer,
);
let mut info = vk::DeviceCreateInfo::builder()
.queue_create_infos(&family_infos)
.enabled_extension_names(&str_pointers);
enabled_phd_features.add_to_device_create_builder(&mut info);
self.instance.raw.create_device(self.raw, &info, None)?
};
let swapchain_fn = khr::Swapchain::new(&self.instance.raw, &raw_device);
let indirect_count_fn = if enabled_extensions.contains(&khr::DrawIndirectCount::name()) {
Some(super::ExtensionFn::Extension(khr::DrawIndirectCount::new(
&self.instance.raw,
&raw_device,
)))
} else if self.phd_capabilities.properties.api_version >= vk::API_VERSION_1_2 {
Some(super::ExtensionFn::Promoted)
} else {
None
};
let naga_options = {
use naga::back::spv;
let capabilities = [
spv::Capability::Shader,
spv::Capability::Matrix,
spv::Capability::InputAttachment,
spv::Capability::Sampled1D,
spv::Capability::Image1D,
spv::Capability::SampledBuffer,
spv::Capability::ImageBuffer,
spv::Capability::ImageQuery,
spv::Capability::DerivativeControl,
//TODO: fill out the rest
];
let mut flags = spv::WriterFlags::empty();
flags.set(
spv::WriterFlags::DEBUG,
self.instance.flags.contains(crate::InstanceFlag::DEBUG),
);
spv::Options {
lang_version: (1, 0),
flags,
capabilities: Some(capabilities.iter().cloned().collect()),
}
};
let queue = super::Queue {
//TODO: make this nicer
raw: raw_device.get_device_queue(0, 0),
swapchain_fn,
};
let device = super::Device {
shared: Arc::new(super::DeviceShared {
raw: raw_device,
instance: Arc::clone(&self.instance),
extension_fns: super::DeviceExtensionFunctions {
draw_indirect_count: indirect_count_fn,
},
features,
vendor_id: self.phd_capabilities.properties.vendor_id,
flip_y_requires_shift: self.phd_capabilities.properties.api_version
>= vk::API_VERSION_1_1
|| self
.phd_capabilities
.supports_extension(vk::KhrMaintenance1Fn::name()),
imageless_framebuffers: supports_vulkan12_imageless_framebuffer
|| self
.phd_capabilities
.supports_extension(vk::KhrImagelessFramebufferFn::name()),
image_view_usage: self.phd_capabilities.properties.api_version
>= vk::API_VERSION_1_1
|| self
.phd_capabilities
.supports_extension(vk::KhrMaintenance2Fn::name()),
timestamp_period: self.phd_capabilities.properties.limits.timestamp_period,
}),
valid_ash_memory_types,
naga_options,
};
Ok(crate::OpenDevice { device, queue })
}
unsafe fn close(&self, device: super::Device) {
device.shared.raw.destroy_device(None);
}
unsafe fn texture_format_capabilities(
&self,
format: wgt::TextureFormat,
) -> crate::TextureFormatCapability {
crate::TextureFormatCapability::empty()
}
unsafe fn surface_capabilities(
&self,
surface: &super::Surface,
) -> Option<crate::SurfaceCapabilities> {
let queue_family_index = 0; //TODO
match surface.functor.get_physical_device_surface_support(
self.raw,
queue_family_index,
surface.raw,
) {
Ok(true) => (),
Ok(false) => return None,
Err(e) => {
log::error!("get_physical_device_surface_support: {}", e);
return None;
}
}
let caps = match surface
.functor
.get_physical_device_surface_capabilities(self.raw, surface.raw)
{
Ok(caps) => caps,
Err(e) => {
log::error!("get_physical_device_surface_capabilities: {}", e);
return None;
}
};
// If image count is 0, the support number of images is unlimited.
let max_image_count = if caps.max_image_count == 0 {
!0
} else {
caps.max_image_count
};
// `0xFFFFFFFF` indicates that the extent depends on the created swapchain.
let current_extent = if caps.current_extent.width != !0 && caps.current_extent.height != !0
{
Some(wgt::Extent3d {
width: caps.current_extent.width,
height: caps.current_extent.height,
depth_or_array_layers: 1,
})
} else {
None
};
let min_extent = wgt::Extent3d {
width: caps.min_image_extent.width,
height: caps.min_image_extent.height,
depth_or_array_layers: 1,
};
let max_extent = wgt::Extent3d {
width: caps.max_image_extent.width,
height: caps.max_image_extent.height,
depth_or_array_layers: caps.max_image_array_layers,
};
let raw_present_modes = match surface
.functor
.get_physical_device_surface_present_modes(self.raw, surface.raw)
{
Ok(present_modes) => present_modes,
Err(e) => {
log::error!("get_physical_device_surface_present_modes: {}", e);
Vec::new()
}
};
let raw_surface_formats = match surface
.functor
.get_physical_device_surface_formats(self.raw, surface.raw)
{
Ok(formats) => formats,
Err(e) => {
log::error!("get_physical_device_surface_formats: {}", e);
Vec::new()
}
};
let supported_formats = [
wgt::TextureFormat::Rgba8Unorm,
wgt::TextureFormat::Rgba8UnormSrgb,
wgt::TextureFormat::Bgra8Unorm,
wgt::TextureFormat::Bgra8UnormSrgb,
];
let formats = supported_formats
.iter()
.cloned()
.filter(|format| {
let vk_format = conv::map_texture_format(format);
raw_surface_formats
.iter()
.any(|sf| sf.format == vk_format || sf.format == vk::Format::UNDEFINED)
})
.collect();
Some(crate::SurfaceCapabilities {
formats,
swap_chain_sizes: 1..=max_image_count,
current_extent,
extents: min_extent..=max_extent,
usage: conv::map_vk_image_usage(caps.supported_usage_flags),
present_modes: raw_present_modes
.into_iter()
.map(conv::map_vk_present_mode)
.collect(),
composite_alpha_modes: conv::map_vk_composite_alpha(caps.supported_composite_alpha),
})
}
}

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,532 @@
use std::{
cmp,
ffi::{c_void, CStr, CString},
mem,
sync::Arc,
};
use ash::{
extensions::{ext, khr},
version::{DeviceV1_0 as _, EntryV1_0 as _, InstanceV1_0 as _},
vk,
};
impl super::Swapchain {
unsafe fn release_resources(self, device: &ash::Device) -> Self {
let _ = device.device_wait_idle();
device.destroy_fence(self.fence, None);
self
}
}
impl super::Instance {
fn create_surface_from_xlib(
&self,
dpy: *mut vk::Display,
window: vk::Window,
) -> super::Surface {
if !self.extensions.contains(&khr::XlibSurface::name()) {
panic!("Vulkan driver does not support VK_KHR_XLIB_SURFACE");
}
let surface = {
let xlib_loader = khr::XlibSurface::new(&self.entry, &self.shared.raw);
let info = vk::XlibSurfaceCreateInfoKHR::builder()
.flags(vk::XlibSurfaceCreateFlagsKHR::empty())
.window(window)
.dpy(dpy);
unsafe { xlib_loader.create_xlib_surface(&info, None) }
.expect("XlibSurface::create_xlib_surface() failed")
};
self.create_surface_from_vk_surface_khr(surface)
}
fn create_surface_from_xcb(
&self,
connection: *mut vk::xcb_connection_t,
window: vk::xcb_window_t,
) -> super::Surface {
if !self.extensions.contains(&khr::XcbSurface::name()) {
panic!("Vulkan driver does not support VK_KHR_XCB_SURFACE");
}
let surface = {
let xcb_loader = khr::XcbSurface::new(&self.entry, &self.shared.raw);
let info = vk::XcbSurfaceCreateInfoKHR::builder()
.flags(vk::XcbSurfaceCreateFlagsKHR::empty())
.window(window)
.connection(connection);
unsafe { xcb_loader.create_xcb_surface(&info, None) }
.expect("XcbSurface::create_xcb_surface() failed")
};
self.create_surface_from_vk_surface_khr(surface)
}
fn create_surface_from_wayland(
&self,
display: *mut c_void,
surface: *mut c_void,
) -> super::Surface {
if !self.extensions.contains(&khr::WaylandSurface::name()) {
panic!("Vulkan driver does not support VK_KHR_WAYLAND_SURFACE");
}
let surface = {
let w_loader = khr::WaylandSurface::new(&self.entry, &self.shared.raw);
let info = vk::WaylandSurfaceCreateInfoKHR::builder()
.flags(vk::WaylandSurfaceCreateFlagsKHR::empty())
.display(display)
.surface(surface);
unsafe { w_loader.create_wayland_surface(&info, None) }.expect("WaylandSurface failed")
};
self.create_surface_from_vk_surface_khr(surface)
}
fn create_surface_android(&self, window: *const c_void) -> super::Surface {
let surface = {
let a_loader = khr::AndroidSurface::new(&self.entry, &self.shared.raw);
let info = vk::AndroidSurfaceCreateInfoKHR::builder()
.flags(vk::AndroidSurfaceCreateFlagsKHR::empty())
.window(window as *mut _);
unsafe { a_loader.create_android_surface(&info, None) }.expect("AndroidSurface failed")
};
self.create_surface_from_vk_surface_khr(surface)
}
fn create_surface_from_hwnd(
&self,
hinstance: *mut c_void,
hwnd: *mut c_void,
) -> super::Surface {
if !self.extensions.contains(&khr::Win32Surface::name()) {
panic!("Vulkan driver does not support VK_KHR_WIN32_SURFACE");
}
let surface = {
let info = vk::Win32SurfaceCreateInfoKHR::builder()
.flags(vk::Win32SurfaceCreateFlagsKHR::empty())
.hinstance(hinstance)
.hwnd(hwnd);
let win32_loader = khr::Win32Surface::new(&self.entry, &self.shared.raw);
unsafe {
win32_loader
.create_win32_surface(&info, None)
.expect("Unable to create Win32 surface")
}
};
self.create_surface_from_vk_surface_khr(surface)
}
#[cfg(feature = "disabled")]
fn create_surface_from_ns_view(&self, view: *mut c_void) -> super::Surface {
use ash::extensions::mvk;
use core_graphics_types::{base::CGFloat, geometry::CGRect};
use objc::runtime::{Object, BOOL, YES};
// TODO: this logic is duplicated from gfx-backend-metal, refactor?
unsafe {
let view = view as *mut Object;
let existing: *mut Object = msg_send![view, layer];
let class = class!(CAMetalLayer);
let use_current = if existing.is_null() {
false
} else {
let result: BOOL = msg_send![existing, isKindOfClass: class];
result == YES
};
if !use_current {
let layer: *mut Object = msg_send![class, new];
let () = msg_send![view, setLayer: layer];
let bounds: CGRect = msg_send![view, bounds];
let () = msg_send![layer, setBounds: bounds];
let window: *mut Object = msg_send![view, window];
if !window.is_null() {
let scale_factor: CGFloat = msg_send![window, backingScaleFactor];
let () = msg_send![layer, setContentsScale: scale_factor];
}
}
}
if !self.extensions.contains(&mvk::MacOSSurface::name()) {
panic!("Vulkan driver does not support VK_MVK_MACOS_SURFACE");
}
let surface = {
let mac_os_loader = mvk::MacOSSurface::new(&self.entry, &self.shared.raw);
let mut info = vk::MacOSSurfaceCreateInfoMVK::builder()
.flags(vk::MacOSSurfaceCreateFlagsMVK::empty());
if let Some(view) = unsafe { view.as_ref() } {
info = info.view(view);
}
unsafe {
mac_os_loader
.create_mac_os_surface_mvk(&info, None)
.expect("Unable to create macOS surface")
}
};
self.create_surface_from_vk_surface_khr(surface)
}
fn create_surface_from_vk_surface_khr(&self, surface: vk::SurfaceKHR) -> super::Surface {
let functor = khr::Surface::new(&self.entry, &self.shared.raw);
super::Surface {
raw: surface,
functor,
instance: Arc::clone(&self.shared),
swapchain: None,
}
}
}
impl crate::Instance<super::Api> for super::Instance {
unsafe fn init(desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
let entry = match unsafe { ash::Entry::new() } {
Ok(entry) => entry,
Err(err) => {
log::info!("Missing Vulkan entry points: {:?}", err);
return Err(crate::InstanceError);
}
};
let driver_api_version = match entry.try_enumerate_instance_version() {
// Vulkan 1.1+
Ok(Some(version)) => version.into(),
Ok(None) => vk::API_VERSION_1_0,
Err(err) => {
log::warn!("try_enumerate_instance_version: {:?}", err);
return Err(crate::InstanceError);
}
};
let app_name = CString::new(desc.name).unwrap();
let app_info = vk::ApplicationInfo::builder()
.application_name(app_name.as_c_str())
.application_version(1)
.engine_name(CStr::from_bytes_with_nul(b"wgpu-hal\0").unwrap())
.engine_version(2)
.api_version({
// Pick the latest API version available, but don't go later than the SDK version used by `gfx_backend_vulkan`.
cmp::min(driver_api_version, {
// This is the max Vulkan API version supported by `wgpu-hal`.
//
// If we want to increment this, there are some things that must be done first:
// - Audit the behavioral differences between the previous and new API versions.
// - Audit all extensions used by this backend:
// - If any were promoted in the new API version and the behavior has changed, we must handle the new behavior in addition to the old behavior.
// - If any were obsoleted in the new API version, we must implement a fallback for the new API version
// - If any are non-KHR-vendored, we must ensure the new behavior is still correct (since backwards-compatibility is not guaranteed).
vk::HEADER_VERSION_COMPLETE
})
.into()
});
let instance_extensions = entry
.enumerate_instance_extension_properties()
.map_err(|e| {
log::info!("enumerate_instance_extension_properties: {:?}", e);
crate::InstanceError
})?;
let instance_layers = entry.enumerate_instance_layer_properties().map_err(|e| {
log::info!("enumerate_instance_layer_properties: {:?}", e);
crate::InstanceError
})?;
// Check our extensions against the available extensions
let extensions = {
let mut extensions: Vec<&'static CStr> = Vec::new();
extensions.push(khr::Surface::name());
// Platform-specific WSI extensions
if cfg!(all(
unix,
not(target_os = "android"),
not(target_os = "macos")
)) {
extensions.push(khr::XlibSurface::name());
extensions.push(khr::XcbSurface::name());
extensions.push(khr::WaylandSurface::name());
}
if cfg!(target_os = "android") {
extensions.push(khr::AndroidSurface::name());
}
if cfg!(target_os = "windows") {
extensions.push(khr::Win32Surface::name());
}
if cfg!(target_os = "macos") {
extensions.push(ash::extensions::mvk::MacOSSurface::name());
}
extensions.push(ext::DebugUtils::name());
extensions.push(vk::KhrGetPhysicalDeviceProperties2Fn::name());
// VK_KHR_storage_buffer_storage_class required for `Naga` on Vulkan 1.0 devices
if driver_api_version == vk::API_VERSION_1_0 {
extensions.push(vk::KhrStorageBufferStorageClassFn::name());
}
// Only keep available extensions.
extensions.retain(|&ext| {
if instance_extensions
.iter()
.find(|inst_ext| unsafe {
CStr::from_ptr(inst_ext.extension_name.as_ptr()) == ext
})
.is_some()
{
true
} else {
log::info!("Unable to find extension: {}", ext.to_string_lossy());
false
}
});
extensions
};
if driver_api_version == vk::API_VERSION_1_0
&& !extensions.contains(&vk::KhrStorageBufferStorageClassFn::name())
{
log::warn!("Required VK_KHR_storage_buffer_storage_class extension is not supported");
return Err(crate::InstanceError);
}
// Check requested layers against the available layers
let layers = {
let mut layers: Vec<&'static CStr> = Vec::new();
if desc.flags.contains(crate::InstanceFlag::VALIDATION) {
layers.push(CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap());
}
// Only keep available layers.
layers.retain(|&layer| {
if instance_layers
.iter()
.find(|inst_layer| unsafe {
CStr::from_ptr(inst_layer.layer_name.as_ptr()) == layer
})
.is_some()
{
true
} else {
log::warn!("Unable to find layer: {}", layer.to_string_lossy());
false
}
});
layers
};
let instance = {
let str_pointers = layers
.iter()
.chain(extensions.iter())
.map(|&s| {
// Safe because `layers` and `extensions` entries have static lifetime.
s.as_ptr()
})
.collect::<Vec<_>>();
let create_info = vk::InstanceCreateInfo::builder()
.flags(vk::InstanceCreateFlags::empty())
.application_info(&app_info)
.enabled_layer_names(&str_pointers[..layers.len()])
.enabled_extension_names(&str_pointers[layers.len()..]);
entry.create_instance(&create_info, None).map_err(|e| {
log::warn!("create_instance: {:?}", e);
crate::InstanceError
})?
};
let get_physical_device_properties = extensions
.iter()
.find(|&&ext| ext == vk::KhrGetPhysicalDeviceProperties2Fn::name())
.map(|_| {
vk::KhrGetPhysicalDeviceProperties2Fn::load(|name| {
mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr()))
})
});
Ok(Self {
shared: Arc::new(super::InstanceShared {
raw: instance,
flags: desc.flags,
get_physical_device_properties,
}),
extensions,
entry,
})
}
unsafe fn create_surface(
&self,
has_handle: &impl raw_window_handle::HasRawWindowHandle,
) -> Result<super::Surface, crate::InstanceError> {
use raw_window_handle::RawWindowHandle;
match has_handle.raw_window_handle() {
#[cfg(all(
unix,
not(target_os = "android"),
not(target_os = "macos"),
not(target_os = "solaris")
))]
RawWindowHandle::Wayland(handle)
if self.extensions.contains(&khr::WaylandSurface::name()) =>
{
Ok(self.create_surface_from_wayland(handle.display, handle.surface))
}
#[cfg(all(
unix,
not(target_os = "android"),
not(target_os = "macos"),
not(target_os = "solaris")
))]
RawWindowHandle::Xlib(handle)
if self.extensions.contains(&khr::XlibSurface::name()) =>
{
Ok(self.create_surface_from_xlib(handle.display as *mut _, handle.window))
}
#[cfg(all(
unix,
not(target_os = "android"),
not(target_os = "macos"),
not(target_os = "ios")
))]
RawWindowHandle::Xcb(handle) if self.extensions.contains(&khr::XcbSurface::name()) => {
Ok(self.create_surface_from_xcb(handle.connection as *mut _, handle.window))
}
#[cfg(target_os = "android")]
RawWindowHandle::Android(handle) => {
Ok(self.create_surface_android(handle.a_native_window))
}
#[cfg(windows)]
RawWindowHandle::Windows(handle) => {
use winapi::um::libloaderapi::GetModuleHandleW;
let hinstance = GetModuleHandleW(std::ptr::null());
Ok(self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd))
}
#[cfg(target_os = "macos_disabled")]
RawWindowHandle::MacOS(handle) => Ok(self.create_surface_from_ns_view(handle.ns_view)),
_ => Err(crate::InstanceError),
}
}
unsafe fn destroy_surface(&self, surface: super::Surface) {
surface.functor.destroy_surface(surface.raw, None);
}
unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<super::Api>> {
let raw_devices = match unsafe { self.shared.raw.enumerate_physical_devices() } {
Ok(devices) => devices,
Err(err) => {
log::error!("enumerate_adapters: {}", err);
Vec::new()
}
};
raw_devices
.into_iter()
.flat_map(|device| self.expose_adapter(device))
.collect()
}
}
impl crate::Surface<super::Api> for super::Surface {
unsafe fn configure(
&mut self,
device: &super::Device,
config: &crate::SurfaceConfiguration,
) -> Result<(), crate::SurfaceError> {
let usage = config.usage;
let format = config.format;
let old = self
.swapchain
.take()
.map(|sc| sc.release_resources(&device.shared.raw));
let swapchain = device.create_swapchain(self, config, old)?;
self.swapchain = Some(swapchain);
Ok(())
}
unsafe fn unconfigure(&mut self, device: &super::Device) {
if let Some(sc) = self.swapchain.take() {
let swapchain = sc.release_resources(&device.shared.raw);
swapchain.functor.destroy_swapchain(swapchain.raw, None);
}
}
unsafe fn acquire_texture(
&mut self,
timeout_ms: u32,
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let sc = self.swapchain.as_mut().unwrap();
let timeout_ns = timeout_ms as u64 * super::MILLIS_TO_NANOS;
// will block if no image is available
let (index, suboptimal) =
match sc
.functor
.acquire_next_image(sc.raw, timeout_ns, vk::Semaphore::null(), sc.fence)
{
Ok(pair) => pair,
Err(error) => {
return match error {
vk::Result::TIMEOUT => Ok(None),
vk::Result::NOT_READY | vk::Result::ERROR_OUT_OF_DATE_KHR => {
Err(crate::SurfaceError::Outdated)
}
vk::Result::ERROR_SURFACE_LOST_KHR => Err(crate::SurfaceError::Lost),
other => Err(crate::DeviceError::from(other).into()),
}
}
};
// special case for Intel Vulkan returning bizzare values (ugh)
if sc.device.vendor_id == crate::aux::db::intel::VENDOR && index > 0x100 {
return Err(crate::SurfaceError::Outdated);
}
let fences = &[sc.fence];
sc.device
.raw
.wait_for_fences(fences, true, !0)
.map_err(crate::DeviceError::from)?;
sc.device
.raw
.reset_fences(fences)
.map_err(crate::DeviceError::from)?;
let texture = super::SurfaceTexture {
index,
texture: super::Texture {
raw: sc.images[index as usize],
ty: vk::ImageType::TYPE_2D,
flags: vk::ImageCreateFlags::empty(),
extent: sc.extent,
},
};
Ok(Some(crate::AcquiredSurfaceTexture {
texture,
suboptimal,
}))
}
unsafe fn discard_texture(&mut self, _texture: super::SurfaceTexture) {}
}

464
wgpu-hal/src/vulkan/mod.rs Normal file
View File

@ -0,0 +1,464 @@
#![allow(unused_variables)]
mod adapter;
mod conv;
mod instance;
use ash::{extensions::khr, vk};
use std::{borrow::Borrow, ffi::CStr, ops::Range, sync::Arc};
const MILLIS_TO_NANOS: u64 = 1_000_000;
#[derive(Clone)]
pub struct Api;
pub struct Encoder;
#[derive(Debug)]
pub struct Resource;
type DeviceResult<T> = Result<T, crate::DeviceError>;
impl crate::Api for Api {
type Instance = Instance;
type Surface = Surface;
type Adapter = Adapter;
type Queue = Queue;
type Device = Device;
type CommandBuffer = Encoder;
type Buffer = Resource;
type Texture = Texture;
type SurfaceTexture = SurfaceTexture;
type TextureView = Resource;
type Sampler = Resource;
type QuerySet = Resource;
type Fence = Resource;
type BindGroupLayout = Resource;
type BindGroup = Resource;
type PipelineLayout = Resource;
type ShaderModule = Resource;
type RenderPipeline = Resource;
type ComputePipeline = Resource;
}
struct RenderDocEntry {
api: renderdoc_sys::RENDERDOC_API_1_4_1,
lib: libloading::Library,
}
unsafe impl Send for RenderDocEntry {}
unsafe impl Sync for RenderDocEntry {}
struct InstanceShared {
raw: ash::Instance,
flags: crate::InstanceFlag,
get_physical_device_properties: Option<vk::KhrGetPhysicalDeviceProperties2Fn>,
//TODO
//debug_messenger: Option<DebugMessenger>,
//render_doc_entry: Result<RenderDocEntry, String>,
}
pub struct Instance {
shared: Arc<InstanceShared>,
extensions: Vec<&'static CStr>,
entry: ash::Entry,
}
struct Swapchain {
raw: vk::SwapchainKHR,
functor: khr::Swapchain,
extent: vk::Extent3D,
device: Arc<DeviceShared>,
fence: vk::Fence,
//semaphore: vk::Semaphore,
images: Vec<vk::Image>,
}
pub struct Surface {
raw: vk::SurfaceKHR,
functor: khr::Surface,
instance: Arc<InstanceShared>,
swapchain: Option<Swapchain>,
}
#[derive(Debug)]
pub struct SurfaceTexture {
index: u32,
texture: Texture,
}
impl Borrow<Texture> for SurfaceTexture {
fn borrow(&self) -> &Texture {
&self.texture
}
}
pub struct Adapter {
raw: vk::PhysicalDevice,
instance: Arc<InstanceShared>,
queue_families: Vec<vk::QueueFamilyProperties>,
known_memory_flags: vk::MemoryPropertyFlags,
phd_capabilities: adapter::PhysicalDeviceCapabilities,
phd_features: adapter::PhysicalDeviceFeatures,
available_features: wgt::Features,
downlevel_flags: wgt::DownlevelFlags,
}
// TODO there's no reason why this can't be unified--the function pointers should all be the same--it's not clear how to do this with `ash`.
enum ExtensionFn<T> {
/// The loaded function pointer struct for an extension.
Extension(T),
/// The extension was promoted to a core version of Vulkan and the functions on `ash`'s `DeviceV1_x` traits should be used.
Promoted,
}
impl<T> ExtensionFn<T> {
/// Expect `self` to be `Self::Extension` and return the inner value.
fn unwrap_extension(&self) -> &T {
match *self {
Self::Extension(ref t) => t,
Self::Promoted => panic!(),
}
}
}
struct DeviceExtensionFunctions {
draw_indirect_count: Option<ExtensionFn<khr::DrawIndirectCount>>,
}
struct DeviceShared {
raw: ash::Device,
instance: Arc<InstanceShared>,
extension_fns: DeviceExtensionFunctions,
features: wgt::Features,
vendor_id: u32,
/// The `hal::Features::NDC_Y_UP` flag is implemented with either `VK_AMD_negative_viewport_height` or `VK_KHR_maintenance1`/1.1+. The AMD extension for negative viewport height does not require a Y shift.
///
/// This flag is `true` if the device has `VK_KHR_maintenance1`/1.1+ and `false` otherwise (i.e. in the case of `VK_AMD_negative_viewport_height`).
flip_y_requires_shift: bool,
imageless_framebuffers: bool,
image_view_usage: bool,
timestamp_period: f32,
}
pub struct Device {
shared: Arc<DeviceShared>,
valid_ash_memory_types: u32,
naga_options: naga::back::spv::Options,
}
pub struct Queue {
raw: vk::Queue,
swapchain_fn: khr::Swapchain,
//device: Arc<DeviceShared>,
}
#[derive(Debug)]
pub struct Texture {
raw: vk::Image,
ty: vk::ImageType,
flags: vk::ImageCreateFlags,
extent: vk::Extent3D,
}
impl crate::Queue<Api> for Queue {
unsafe fn submit<I>(
&mut self,
command_buffers: I,
signal_fence: Option<(&mut Resource, crate::FenceValue)>,
) -> DeviceResult<()> {
Ok(())
}
unsafe fn present(
&mut self,
surface: &mut Surface,
texture: SurfaceTexture,
) -> Result<(), crate::SurfaceError> {
Ok(())
}
}
impl crate::Device<Api> for Context {
unsafe fn create_buffer(&self, desc: &crate::BufferDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_buffer(&self, buffer: Resource) {}
unsafe fn map_buffer(
&self,
buffer: &Resource,
range: crate::MemoryRange,
) -> DeviceResult<crate::BufferMapping> {
Err(crate::DeviceError::Lost)
}
unsafe fn unmap_buffer(&self, buffer: &Resource) -> DeviceResult<()> {
Ok(())
}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}
unsafe fn invalidate_mapped_ranges<I>(&self, buffer: &Resource, ranges: I) {}
unsafe fn create_texture(&self, desc: &crate::TextureDescriptor) -> DeviceResult<Texture> {
unimplemented!()
}
unsafe fn destroy_texture(&self, texture: Texture) {}
unsafe fn create_texture_view(
&self,
texture: &Texture,
desc: &crate::TextureViewDescriptor,
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_texture_view(&self, view: Resource) {}
unsafe fn create_sampler(&self, desc: &crate::SamplerDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_sampler(&self, sampler: Resource) {}
unsafe fn create_command_buffer(
&self,
desc: &crate::CommandBufferDescriptor,
) -> DeviceResult<Encoder> {
Ok(Encoder)
}
unsafe fn destroy_command_buffer(&self, cmd_buf: Encoder) {}
unsafe fn create_bind_group_layout(
&self,
desc: &crate::BindGroupLayoutDescriptor,
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_bind_group_layout(&self, bg_layout: Resource) {}
unsafe fn create_pipeline_layout(
&self,
desc: &crate::PipelineLayoutDescriptor<Api>,
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_pipeline_layout(&self, pipeline_layout: Resource) {}
unsafe fn create_bind_group(
&self,
desc: &crate::BindGroupDescriptor<Api>,
) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_bind_group(&self, group: Resource) {}
unsafe fn create_shader_module(
&self,
desc: &crate::ShaderModuleDescriptor,
shader: crate::NagaShader,
) -> Result<Resource, crate::ShaderError> {
Ok(Resource)
}
unsafe fn destroy_shader_module(&self, module: Resource) {}
unsafe fn create_render_pipeline(
&self,
desc: &crate::RenderPipelineDescriptor<Api>,
) -> Result<Resource, crate::PipelineError> {
Ok(Resource)
}
unsafe fn destroy_render_pipeline(&self, pipeline: Resource) {}
unsafe fn create_compute_pipeline(
&self,
desc: &crate::ComputePipelineDescriptor<Api>,
) -> Result<Resource, crate::PipelineError> {
Ok(Resource)
}
unsafe fn destroy_compute_pipeline(&self, pipeline: Resource) {}
unsafe fn create_query_set(&self, desc: &wgt::QuerySetDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_query_set(&self, set: Resource) {}
unsafe fn create_fence(&self) -> DeviceResult<Resource> {
Ok(Resource)
}
unsafe fn destroy_fence(&self, fence: Resource) {}
unsafe fn get_fence_value(&self, fence: &Resource) -> DeviceResult<crate::FenceValue> {
Ok(0)
}
unsafe fn wait(
&self,
fence: &Resource,
value: crate::FenceValue,
timeout_ms: u32,
) -> DeviceResult<bool> {
Ok(true)
}
unsafe fn start_capture(&self) -> bool {
false
}
unsafe fn stop_capture(&self) {}
}
impl crate::CommandBuffer<Api> for Encoder {
unsafe fn finish(&mut self) {}
unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
where
T: Iterator<Item = crate::BufferBarrier<'a, Api>>,
{
}
unsafe fn transition_textures<'a, T>(&mut self, barriers: T)
where
T: Iterator<Item = crate::TextureBarrier<'a, Api>>,
{
}
unsafe fn fill_buffer(&mut self, buffer: &Resource, range: crate::MemoryRange, value: u8) {}
unsafe fn copy_buffer_to_buffer<T>(&mut self, src: &Resource, dst: &Resource, regions: T) {}
unsafe fn copy_texture_to_texture<T>(
&mut self,
src: &Texture,
src_usage: crate::TextureUse,
dst: &Texture,
regions: T,
) {
}
unsafe fn copy_buffer_to_texture<T>(&mut self, src: &Resource, dst: &Texture, regions: T) {}
unsafe fn copy_texture_to_buffer<T>(
&mut self,
src: &Texture,
src_usage: crate::TextureUse,
dst: &Resource,
regions: T,
) {
}
unsafe fn begin_query(&mut self, set: &Resource, index: u32) {}
unsafe fn end_query(&mut self, set: &Resource, index: u32) {}
unsafe fn write_timestamp(&mut self, set: &Resource, index: u32) {}
unsafe fn reset_queries(&mut self, set: &Resource, range: Range<u32>) {}
unsafe fn copy_query_results(
&mut self,
set: &Resource,
range: Range<u32>,
buffer: &Resource,
offset: wgt::BufferAddress,
) {
}
// render
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<Api>) {}
unsafe fn end_render_pass(&mut self) {}
unsafe fn set_bind_group(
&mut self,
layout: &Resource,
index: u32,
group: &Resource,
dynamic_offsets: &[wgt::DynamicOffset],
) {
}
unsafe fn set_push_constants(
&mut self,
layout: &Resource,
stages: wgt::ShaderStage,
offset: u32,
data: &[u32],
) {
}
unsafe fn insert_debug_marker(&mut self, label: &str) {}
unsafe fn begin_debug_marker(&mut self, group_label: &str) {}
unsafe fn end_debug_marker(&mut self) {}
unsafe fn set_render_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn set_index_buffer<'a>(
&mut self,
binding: crate::BufferBinding<'a, Api>,
format: wgt::IndexFormat,
) {
}
unsafe fn set_vertex_buffer<'a>(&mut self, index: u32, binding: crate::BufferBinding<'a, Api>) {
}
unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth_range: Range<f32>) {}
unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect<u32>) {}
unsafe fn set_stencil_reference(&mut self, value: u32) {}
unsafe fn set_blend_constants(&mut self, color: &wgt::Color) {}
unsafe fn draw(
&mut self,
start_vertex: u32,
vertex_count: u32,
start_instance: u32,
instance_count: u32,
) {
}
unsafe fn draw_indexed(
&mut self,
start_index: u32,
index_count: u32,
base_vertex: i32,
start_instance: u32,
instance_count: u32,
) {
}
unsafe fn draw_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
draw_count: u32,
) {
}
unsafe fn draw_indexed_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
draw_count: u32,
) {
}
unsafe fn draw_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
unsafe fn draw_indexed_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
// compute
unsafe fn begin_compute_pass(&mut self, desc: &crate::ComputePassDescriptor) {}
unsafe fn end_compute_pass(&mut self) {}
unsafe fn set_compute_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn dispatch(&mut self, count: [u32; 3]) {}
unsafe fn dispatch_indirect(&mut self, buffer: &Resource, offset: wgt::BufferAddress) {}
}
impl From<vk::Result> for crate::DeviceError {
fn from(result: vk::Result) -> Self {
match result {
vk::Result::ERROR_OUT_OF_HOST_MEMORY | vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
Self::OutOfMemory
}
vk::Result::ERROR_DEVICE_LOST => Self::Lost,
_ => {
log::warn!("Unrecognized device error {:?}", result);
Self::Lost
}
}
}
}

View File

@ -152,6 +152,8 @@ impl<S> Default for RequestAdapterOptions<S> {
}
}
//TODO: make robust resource access configurable
bitflags::bitflags! {
/// Features that are not guaranteed to be supported.
///