hal/mtl: creation of bind groups

This commit is contained in:
Dzmitry Malyshau 2021-06-09 17:06:43 -04:00
parent f46459c589
commit 9f904700ee
11 changed files with 715 additions and 191 deletions

3
Cargo.lock generated
View File

@ -1914,7 +1914,10 @@ dependencies = [
name = "wgpu-hal"
version = "0.1.0"
dependencies = [
"arrayvec",
"bitflags",
"foreign-types",
"fxhash",
"log",
"metal",
"naga",

View File

@ -744,6 +744,8 @@ impl<A: HalApi> Device<A> {
None
};
//TODO: check for wgt::DownlevelFlags::COMPARISON_SAMPLERS
let hal_desc = hal::SamplerDescriptor {
label: desc.label.borrow_option(),
address_modes: desc.address_modes,

View File

@ -13,10 +13,12 @@ license = "MIT OR Apache-2.0"
[features]
default = ["metal"]
metal = ["mtl", "objc", "parking_lot"]
metal = ["foreign-types", "mtl", "objc", "parking_lot", "naga/msl-out"]
[dependencies]
arrayvec = "0.5"
bitflags = "1.0"
fxhash = "0.2"
log = "0.4"
parking_lot = { version = "0.11", optional = true }
raw-window-handle = "0.3"
@ -24,6 +26,7 @@ thiserror = "1"
wgt = { package = "wgpu-types", path = "../wgpu-types" }
# backends
foreign-types = { version = "0.3", optional = true }
mtl = { package = "metal", version = "0.22", optional = true }
objc = { version = "0.2.5", optional = true }

7
wgpu-hal/src/aux.rs Normal file
View File

@ -0,0 +1,7 @@
pub fn map_naga_stage(stage: naga::ShaderStage) -> wgt::ShaderStage {
match stage {
naga::ShaderStage::Vertex => wgt::ShaderStage::VERTEX,
naga::ShaderStage::Fragment => wgt::ShaderStage::FRAGMENT,
naga::ShaderStage::Compute => wgt::ShaderStage::COMPUTE,
}
}

View File

@ -36,6 +36,7 @@
clippy::pattern_type_mismatch,
)]
pub mod aux;
pub mod empty;
#[cfg(feature = "metal")]
mod metal;

View File

@ -581,6 +581,7 @@ impl super::PrivateCapabilities {
),
sampler_clamp_to_border: Self::supports_any(&device, SAMPLER_CLAMP_TO_BORDER_SUPPORT),
sampler_lod_average: {
// TODO: Clarify minimum macOS version with Apple (43707452)
let need_version = if os_is_mac { (10, 13) } else { (9, 0) };
Self::version_at_least(major, minor, need_version.0, need_version.1)
},
@ -840,6 +841,11 @@ impl super::PrivateCapabilities {
wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES,
self.texture_cube_array,
);
//TODO: separate the mutable comparisons from immutable ones
downlevel.flags.set(
wgt::DownlevelFlags::COMPARISON_SAMPLERS,
self.mutable_comparison_samplers,
);
crate::Capabilities {
limits: wgt::Limits {

View File

@ -1 +1,163 @@
use super::{Api, Resource};
use std::ops::Range;
impl crate::CommandBuffer<Api> for super::Encoder {
unsafe fn finish(&mut self) {}
unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
where
T: Iterator<Item = crate::BufferBarrier<'a, Api>>,
{
}
unsafe fn transition_textures<'a, T>(&mut self, barriers: T)
where
T: Iterator<Item = crate::TextureBarrier<'a, Api>>,
{
}
unsafe fn fill_buffer(&mut self, buffer: &Resource, range: crate::MemoryRange, value: u8) {}
unsafe fn copy_buffer_to_buffer<T>(&mut self, src: &Resource, dst: &Resource, regions: T)
where
T: Iterator<Item = crate::BufferCopy>,
{
}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_texture_to_texture<T>(
&mut self,
src: &Resource,
src_usage: crate::TextureUse,
dst: &Resource,
regions: T,
) {
}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_buffer_to_texture<T>(&mut self, src: &Resource, dst: &Resource, regions: T) {}
unsafe fn copy_texture_to_buffer<T>(
&mut self,
src: &Resource,
src_usage: crate::TextureUse,
dst: &Resource,
regions: T,
) {
}
unsafe fn begin_query(&mut self, set: &Resource, index: u32) {}
unsafe fn end_query(&mut self, set: &Resource, index: u32) {}
unsafe fn write_timestamp(&mut self, set: &Resource, index: u32) {}
unsafe fn reset_queries(&mut self, set: &Resource, range: Range<u32>) {}
unsafe fn copy_query_results(
&mut self,
set: &Resource,
range: Range<u32>,
buffer: &Resource,
offset: wgt::BufferAddress,
) {
}
// render
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<Api>) {}
unsafe fn end_render_pass(&mut self) {}
unsafe fn set_bind_group(
&mut self,
layout: &Resource,
index: u32,
group: &Resource,
dynamic_offsets: &[wgt::DynamicOffset],
) {
}
unsafe fn set_push_constants(
&mut self,
layout: &Resource,
stages: wgt::ShaderStage,
offset: u32,
data: &[u32],
) {
}
unsafe fn insert_debug_marker(&mut self, label: &str) {}
unsafe fn begin_debug_marker(&mut self, group_label: &str) {}
unsafe fn end_debug_marker(&mut self) {}
unsafe fn set_render_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn set_index_buffer<'a>(
&mut self,
binding: crate::BufferBinding<'a, Api>,
format: wgt::IndexFormat,
) {
}
unsafe fn set_vertex_buffer<'a>(&mut self, index: u32, binding: crate::BufferBinding<'a, Api>) {
}
unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth_range: Range<f32>) {}
unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect<u32>) {}
unsafe fn set_stencil_reference(&mut self, value: u32) {}
unsafe fn set_blend_constants(&mut self, color: &wgt::Color) {}
unsafe fn draw(
&mut self,
start_vertex: u32,
vertex_count: u32,
start_instance: u32,
instance_count: u32,
) {
}
unsafe fn draw_indexed(
&mut self,
start_index: u32,
index_count: u32,
base_vertex: i32,
start_instance: u32,
instance_count: u32,
) {
}
unsafe fn draw_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
draw_count: u32,
) {
}
unsafe fn draw_indexed_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
draw_count: u32,
) {
}
unsafe fn draw_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
unsafe fn draw_indexed_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
// compute
unsafe fn begin_compute_pass(&mut self) {}
unsafe fn end_compute_pass(&mut self) {}
unsafe fn set_compute_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn dispatch(&mut self, count: [u32; 3]) {}
unsafe fn dispatch_indirect(&mut self, buffer: &Resource, offset: wgt::BufferAddress) {}
}

View File

@ -20,13 +20,58 @@ pub fn map_texture_usage(usage: crate::TextureUse) -> mtl::MTLTextureUsage {
}
pub fn map_texture_view_dimension(dim: wgt::TextureViewDimension) -> mtl::MTLTextureType {
use mtl::MTLTextureType::*;
use wgt::TextureViewDimension as Tvd;
match dim {
Tvd::D1 => mtl::MTLTextureType::D1,
Tvd::D2 => mtl::MTLTextureType::D2,
Tvd::D2Array => mtl::MTLTextureType::D2Array,
Tvd::D3 => mtl::MTLTextureType::D3,
Tvd::Cube => mtl::MTLTextureType::Cube,
Tvd::CubeArray => mtl::MTLTextureType::CubeArray,
Tvd::D1 => D1,
Tvd::D2 => D2,
Tvd::D2Array => D2Array,
Tvd::D3 => D3,
Tvd::Cube => Cube,
Tvd::CubeArray => CubeArray,
}
}
pub fn map_compare_function(fun: wgt::CompareFunction) -> mtl::MTLCompareFunction {
use mtl::MTLCompareFunction::*;
use wgt::CompareFunction as Cf;
match fun {
Cf::Never => Never,
Cf::Less => Less,
Cf::LessEqual => LessEqual,
Cf::Equal => Equal,
Cf::GreaterEqual => GreaterEqual,
Cf::Greater => Greater,
Cf::NotEqual => NotEqual,
Cf::Always => Always,
}
}
pub fn map_filter_mode(filter: wgt::FilterMode) -> mtl::MTLSamplerMinMagFilter {
use mtl::MTLSamplerMinMagFilter::*;
match filter {
wgt::FilterMode::Nearest => Nearest,
wgt::FilterMode::Linear => Linear,
}
}
pub fn map_address_mode(address: wgt::AddressMode) -> mtl::MTLSamplerAddressMode {
use mtl::MTLSamplerAddressMode::*;
use wgt::AddressMode as Fm;
match address {
Fm::Repeat => Repeat,
Fm::MirrorRepeat => MirrorRepeat,
Fm::ClampToEdge => ClampToEdge,
Fm::ClampToBorder => ClampToBorderColor,
//Fm::MirrorClamp => MirrorClampToEdge,
}
}
pub fn map_border_color(border_color: wgt::SamplerBorderColor) -> mtl::MTLSamplerBorderColor {
use mtl::MTLSamplerBorderColor::*;
match border_color {
wgt::SamplerBorderColor::TransparentBlack => TransparentBlack,
wgt::SamplerBorderColor::OpaqueBlack => OpaqueBlack,
wgt::SamplerBorderColor::OpaqueWhite => OpaqueWhite,
}
}

View File

@ -1,5 +1,6 @@
use std::{ptr, sync::Arc};
use super::conv;
use std::ptr;
type DeviceResult<T> = Result<T, crate::DeviceError>;
@ -153,40 +154,323 @@ impl crate::Device<super::Api> for super::Device {
}
unsafe fn destroy_texture_view(&self, _view: super::TextureView) {}
unsafe fn create_sampler(&self, desc: &crate::SamplerDescriptor) -> DeviceResult<Resource> {
Ok(Resource)
unsafe fn create_sampler(
&self,
desc: &crate::SamplerDescriptor,
) -> DeviceResult<super::Sampler> {
let caps = &self.shared.private_caps;
let descriptor = mtl::SamplerDescriptor::new();
descriptor.set_min_filter(conv::map_filter_mode(desc.min_filter));
descriptor.set_mag_filter(conv::map_filter_mode(desc.mag_filter));
descriptor.set_mip_filter(match desc.mipmap_filter {
wgt::FilterMode::Nearest if desc.lod_clamp.is_none() => {
mtl::MTLSamplerMipFilter::NotMipmapped
}
wgt::FilterMode::Nearest => mtl::MTLSamplerMipFilter::Nearest,
wgt::FilterMode::Linear => mtl::MTLSamplerMipFilter::Linear,
});
if let Some(aniso) = desc.anisotropy_clamp {
descriptor.set_max_anisotropy(aniso.get() as _);
}
let [s, t, r] = desc.address_modes;
descriptor.set_address_mode_s(conv::map_address_mode(s));
descriptor.set_address_mode_t(conv::map_address_mode(t));
descriptor.set_address_mode_r(conv::map_address_mode(r));
if let Some(ref range) = desc.lod_clamp {
descriptor.set_lod_min_clamp(range.start);
descriptor.set_lod_max_clamp(range.end);
}
if caps.sampler_lod_average {
descriptor.set_lod_average(true); // optimization
}
if let Some(fun) = desc.compare {
descriptor.set_compare_function(conv::map_compare_function(fun));
}
if let Some(border_color) = desc.border_color {
descriptor.set_border_color(conv::map_border_color(border_color));
}
let raw = self.shared.device.lock().new_sampler(&descriptor);
Ok(super::Sampler { raw })
}
unsafe fn destroy_sampler(&self, sampler: Resource) {}
unsafe fn destroy_sampler(&self, _sampler: super::Sampler) {}
unsafe fn create_command_buffer(
&self,
desc: &crate::CommandBufferDescriptor,
) -> DeviceResult<Encoder> {
Ok(Encoder)
) -> DeviceResult<super::Encoder> {
Ok(super::Encoder)
}
unsafe fn destroy_command_buffer(&self, cmd_buf: Encoder) {}
unsafe fn destroy_command_buffer(&self, cmd_buf: super::Encoder) {}
unsafe fn create_bind_group_layout(
&self,
desc: &crate::BindGroupLayoutDescriptor,
) -> DeviceResult<Resource> {
Ok(Resource)
) -> DeviceResult<super::BindGroupLayout> {
let map = desc
.entries
.iter()
.cloned()
.map(|entry| (entry.binding, entry))
.collect();
Ok(super::BindGroupLayout {
entries: Arc::new(map),
})
}
unsafe fn destroy_bind_group_layout(&self, bg_layout: Resource) {}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {}
unsafe fn create_pipeline_layout(
&self,
desc: &crate::PipelineLayoutDescriptor<Api>,
) -> DeviceResult<Resource> {
Ok(Resource)
desc: &crate::PipelineLayoutDescriptor<super::Api>,
) -> DeviceResult<super::PipelineLayout> {
#[derive(Debug)]
struct StageInfo {
stage: naga::ShaderStage,
counters: super::ResourceData<super::ResourceIndex>,
pc_buffer: Option<super::ResourceIndex>,
pc_limit: u32,
sizes_buffer: Option<super::ResourceIndex>,
sizes_count: u8,
}
impl StageInfo {
fn stage_bit(&self) -> wgt::ShaderStage {
crate::aux::map_naga_stage(self.stage)
}
}
let mut stage_data = super::NAGA_STAGES.map(|&stage| StageInfo {
stage,
counters: super::ResourceData::default(),
pc_buffer: None,
pc_limit: 0,
sizes_buffer: None,
sizes_count: 0,
});
let mut binding_map = std::collections::BTreeMap::default();
let mut bind_group_infos = arrayvec::ArrayVec::new();
// First, place the push constants
for info in stage_data.iter_mut() {
for pcr in desc.push_constant_ranges {
if pcr.stages.contains(info.stage_bit()) {
debug_assert_eq!(pcr.range.end % 4, 0);
info.pc_limit = (pcr.range.end / 4).max(info.pc_limit);
}
}
// round up the limits alignment to 4, so that it matches MTL compiler logic
const LIMIT_MASK: u32 = 3;
//TODO: figure out what and how exactly does the alignment. Clearly, it's not
// straightforward, given that value of 2 stays non-aligned.
if info.pc_limit > LIMIT_MASK {
info.pc_limit = (info.pc_limit + LIMIT_MASK) & !LIMIT_MASK;
}
// handle the push constant buffer assignment and shader overrides
if info.pc_limit != 0 {
info.pc_buffer = Some(info.counters.buffers);
info.counters.buffers += 1;
}
}
// Second, place the described resources
for (group_index, &bgl) in desc.bind_group_layouts.iter().enumerate() {
// remember where the resources for this set start at each shader stage
let mut dynamic_buffers = Vec::new();
let mut sized_buffer_bindings = Vec::new();
let base_resource_indices = stage_data.map(|info| info.counters.clone());
for entry in bgl.entries.values() {
match entry.ty {
wgt::BindingType::Buffer {
ty,
has_dynamic_offset,
min_binding_size: _,
} => {
if has_dynamic_offset {
dynamic_buffers.push(stage_data.map(|info| {
if entry.visibility.contains(info.stage_bit()) {
info.counters.buffers
} else {
!0
}
}));
}
match ty {
wgt::BufferBindingType::Storage { .. } => {
sized_buffer_bindings.push((entry.binding, entry.visibility));
for info in stage_data.iter_mut() {
if entry.visibility.contains(info.stage_bit()) {
info.sizes_count += 1;
}
}
}
}
}
_ => {}
}
for info in stage_data.iter_mut() {
if !entry.visibility.contains(info.stage_bit()) {
continue;
}
let mut target = naga::back::msl::BindTarget::default();
match entry.ty {
wgt::BindingType::Buffer { ty, .. } => {
target.buffer = Some(info.counters.buffers as _);
info.counters.buffers += 1;
if let wgt::BufferBindingType::Storage { read_only } = ty {
target.mutable = !read_only;
}
}
wgt::BindingType::Sampler { .. } => {
target.sampler = Some(naga::back::msl::BindSamplerTarget::Resource(
info.counters.samplers as _,
));
info.counters.samplers += 1;
}
wgt::BindingType::Texture { .. } => {
target.texture = Some(info.counters.textures as _);
info.counters.textures += 1;
}
wgt::BindingType::StorageTexture { access, .. } => {
target.texture = Some(info.counters.textures as _);
info.counters.textures += 1;
target.mutable = match access {
wgt::StorageTextureAccess::ReadOnly => false,
wgt::StorageTextureAccess::WriteOnly => true,
wgt::StorageTextureAccess::ReadWrite => true,
};
}
}
let source = naga::back::msl::BindSource {
stage: info.stage,
group: group_index as u32,
binding: entry.binding,
};
binding_map.insert(source, target);
}
}
bind_group_infos.push(super::BindGroupLayoutInfo {
base_resource_indices,
dynamic_buffers,
sized_buffer_bindings,
});
}
// Finally, make sure we fit the limits
for info in stage_data.iter_mut() {
// handle the sizes buffer assignment and shader overrides
if info.sizes_count != 0 {
info.sizes_buffer = Some(info.counters.buffers);
info.counters.buffers += 1;
}
if info.counters.buffers > self.shared.private_caps.max_buffers_per_stage
|| info.counters.textures > self.shared.private_caps.max_textures_per_stage
|| info.counters.samplers > self.shared.private_caps.max_samplers_per_stage
{
log::error!("Resource limit exceeded: {:?}", info);
return Err(crate::DeviceError::OutOfMemory);
}
}
let per_stage_map = stage_data.map(|info| naga::back::msl::PerStageResources {
push_constant_buffer: info
.pc_buffer
.map(|buffer_index| buffer_index as naga::back::msl::Slot),
sizes_buffer: info
.sizes_buffer
.map(|buffer_index| buffer_index as naga::back::msl::Slot),
});
let naga_options = naga::back::msl::Options {
lang_version: match self.shared.private_caps.msl_version {
mtl::MTLLanguageVersion::V1_0 => (1, 0),
mtl::MTLLanguageVersion::V1_1 => (1, 1),
mtl::MTLLanguageVersion::V1_2 => (1, 2),
mtl::MTLLanguageVersion::V2_0 => (2, 0),
mtl::MTLLanguageVersion::V2_1 => (2, 1),
mtl::MTLLanguageVersion::V2_2 => (2, 2),
mtl::MTLLanguageVersion::V2_3 => (2, 3),
},
binding_map,
inline_samplers: Default::default(),
spirv_cross_compatibility: false,
fake_missing_bindings: false,
per_stage_map: naga::back::msl::PerStageMap {
vs: per_stage_map.vs,
fs: per_stage_map.fs,
cs: per_stage_map.cs,
},
};
Ok(super::PipelineLayout {
naga_options,
bind_group_infos,
push_constants_infos: stage_data.map(|info| {
info.pc_buffer
.map(|buffer_index| super::PushConstantsStage {
count: info.pc_limit,
buffer_index,
})
}),
})
}
unsafe fn destroy_pipeline_layout(&self, pipeline_layout: Resource) {}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {}
unsafe fn create_bind_group(
&self,
desc: &crate::BindGroupDescriptor<Api>,
) -> DeviceResult<Resource> {
Ok(Resource)
desc: &crate::BindGroupDescriptor<super::Api>,
) -> DeviceResult<super::BindGroup> {
//TODO: avoid heap allocation
let mut entries = desc.entries.to_vec();
entries.sort_by_key(|e| e.binding);
let mut bg = super::BindGroup::default();
for (&stage, counter) in super::NAGA_STAGES.iter().zip(bg.counters.iter_mut()) {
let stage_bit = crate::aux::map_naga_stage(stage);
for entry in entries.iter() {
let layout = &desc.layout.entries[&entry.binding];
if !layout.visibility.contains(stage_bit) {
continue;
}
match layout.ty {
wgt::BindingType::Buffer { .. } => {
let source = &desc.buffers[entry.resource_index as usize];
bg.buffers.push(super::BufferResource {
ptr: source.buffer.as_raw(),
offset: source.offset,
});
counter.buffers += 1;
}
wgt::BindingType::Sampler { .. } => {
let res = desc.samplers[entry.resource_index as usize].as_raw();
bg.samplers.push(res);
counter.samplers += 1;
}
wgt::BindingType::Texture { .. } | wgt::BindingType::StorageTexture { .. } => {
let res = desc.textures[entry.resource_index as usize].view.as_raw();
bg.textures.push(res);
counter.textures += 1;
}
}
}
}
Ok(bg)
}
unsafe fn destroy_bind_group(&self, group: Resource) {}
unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {}
unsafe fn create_shader_module(
&self,

View File

@ -1,18 +1,17 @@
#![allow(unused_variables)]
mod adapter;
mod command;
mod conv;
mod device;
mod surface;
use std::{ops::Range, ptr::NonNull, sync::Arc, thread};
use std::{iter, ops, ptr::NonNull, sync::Arc, thread};
use arrayvec::ArrayVec;
use foreign_types::ForeignTypeRef as _;
use parking_lot::Mutex;
#[derive(Clone)]
pub struct Api;
pub struct Context;
pub struct Encoder;
#[derive(Debug)]
pub struct Resource;
@ -32,13 +31,13 @@ impl crate::Api for Api {
type Texture = Texture;
type SurfaceTexture = SurfaceTexture;
type TextureView = TextureView;
type Sampler = Resource;
type Sampler = Sampler;
type QuerySet = Resource;
type Fence = Resource;
type BindGroupLayout = Resource;
type BindGroup = Resource;
type PipelineLayout = Resource;
type BindGroupLayout = BindGroupLayout;
type BindGroup = BindGroup;
type PipelineLayout = PipelineLayout;
type ShaderModule = Resource;
type RenderPipeline = Resource;
type ComputePipeline = Resource;
@ -280,6 +279,12 @@ pub struct Buffer {
unsafe impl Send for Buffer {}
unsafe impl Sync for Buffer {}
impl Buffer {
fn as_raw(&self) -> BufferPtr {
unsafe { NonNull::new_unchecked(self.raw.as_ptr()) }
}
}
#[derive(Debug)]
pub struct Texture {
raw: mtl::Texture,
@ -300,162 +305,166 @@ pub struct TextureView {
unsafe impl Send for TextureView {}
unsafe impl Sync for TextureView {}
impl crate::CommandBuffer<Api> for Encoder {
unsafe fn finish(&mut self) {}
unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
where
T: Iterator<Item = crate::BufferBarrier<'a, Api>>,
{
impl TextureView {
fn as_raw(&self) -> TexturePtr {
unsafe { NonNull::new_unchecked(self.raw.as_ptr()) }
}
unsafe fn transition_textures<'a, T>(&mut self, barriers: T)
where
T: Iterator<Item = crate::TextureBarrier<'a, Api>>,
{
}
unsafe fn fill_buffer(&mut self, buffer: &Resource, range: crate::MemoryRange, value: u8) {}
unsafe fn copy_buffer_to_buffer<T>(&mut self, src: &Resource, dst: &Resource, regions: T)
where
T: Iterator<Item = crate::BufferCopy>,
{
}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_texture_to_texture<T>(
&mut self,
src: &Resource,
src_usage: crate::TextureUse,
dst: &Resource,
regions: T,
) {
}
/// Note: `dst` current usage has to be `TextureUse::COPY_DST`.
unsafe fn copy_buffer_to_texture<T>(&mut self, src: &Resource, dst: &Resource, regions: T) {}
unsafe fn copy_texture_to_buffer<T>(
&mut self,
src: &Resource,
src_usage: crate::TextureUse,
dst: &Resource,
regions: T,
) {
}
unsafe fn begin_query(&mut self, set: &Resource, index: u32) {}
unsafe fn end_query(&mut self, set: &Resource, index: u32) {}
unsafe fn write_timestamp(&mut self, set: &Resource, index: u32) {}
unsafe fn reset_queries(&mut self, set: &Resource, range: Range<u32>) {}
unsafe fn copy_query_results(
&mut self,
set: &Resource,
range: Range<u32>,
buffer: &Resource,
offset: wgt::BufferAddress,
) {
}
// render
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<Api>) {}
unsafe fn end_render_pass(&mut self) {}
unsafe fn set_bind_group(
&mut self,
layout: &Resource,
index: u32,
group: &Resource,
dynamic_offsets: &[wgt::DynamicOffset],
) {
}
unsafe fn set_push_constants(
&mut self,
layout: &Resource,
stages: wgt::ShaderStage,
offset: u32,
data: &[u32],
) {
}
unsafe fn insert_debug_marker(&mut self, label: &str) {}
unsafe fn begin_debug_marker(&mut self, group_label: &str) {}
unsafe fn end_debug_marker(&mut self) {}
unsafe fn set_render_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn set_index_buffer<'a>(
&mut self,
binding: crate::BufferBinding<'a, Api>,
format: wgt::IndexFormat,
) {
}
unsafe fn set_vertex_buffer<'a>(&mut self, index: u32, binding: crate::BufferBinding<'a, Api>) {
}
unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth_range: Range<f32>) {}
unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect<u32>) {}
unsafe fn set_stencil_reference(&mut self, value: u32) {}
unsafe fn set_blend_constants(&mut self, color: &wgt::Color) {}
unsafe fn draw(
&mut self,
start_vertex: u32,
vertex_count: u32,
start_instance: u32,
instance_count: u32,
) {
}
unsafe fn draw_indexed(
&mut self,
start_index: u32,
index_count: u32,
base_vertex: i32,
start_instance: u32,
instance_count: u32,
) {
}
unsafe fn draw_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
draw_count: u32,
) {
}
unsafe fn draw_indexed_indirect(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
draw_count: u32,
) {
}
unsafe fn draw_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
unsafe fn draw_indexed_indirect_count(
&mut self,
buffer: &Resource,
offset: wgt::BufferAddress,
count_buffer: &Resource,
count_offset: wgt::BufferAddress,
max_count: u32,
) {
}
// compute
unsafe fn begin_compute_pass(&mut self) {}
unsafe fn end_compute_pass(&mut self) {}
unsafe fn set_compute_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn dispatch(&mut self, count: [u32; 3]) {}
unsafe fn dispatch_indirect(&mut self, buffer: &Resource, offset: wgt::BufferAddress) {}
}
#[derive(Debug)]
pub struct Sampler {
raw: mtl::SamplerState,
}
unsafe impl Send for Sampler {}
unsafe impl Sync for Sampler {}
impl Sampler {
fn as_raw(&self) -> SamplerPtr {
unsafe { NonNull::new_unchecked(self.raw.as_ptr()) }
}
}
type BindingMap = fxhash::FxHashMap<u32, wgt::BindGroupLayoutEntry>;
#[derive(Debug)]
pub struct BindGroupLayout {
entries: Arc<BindingMap>,
}
#[derive(Clone, Debug, Default)]
struct ResourceData<T> {
buffers: T,
textures: T,
samplers: T,
}
#[derive(Clone, Debug, Default)]
struct MultiStageData<T> {
vs: T,
fs: T,
cs: T,
}
const NAGA_STAGES: MultiStageData<naga::ShaderStage> = MultiStageData {
vs: naga::ShaderStage::Vertex,
fs: naga::ShaderStage::Fragment,
cs: naga::ShaderStage::Compute,
};
impl<T> ops::Index<naga::ShaderStage> for MultiStageData<T> {
type Output = T;
fn index(&self, stage: naga::ShaderStage) -> &T {
match stage {
naga::ShaderStage::Vertex => &self.vs,
naga::ShaderStage::Fragment => &self.fs,
naga::ShaderStage::Compute => &self.cs,
}
}
}
impl<T> MultiStageData<T> {
fn map<Y>(&self, fun: impl Fn(&T) -> Y) -> MultiStageData<Y> {
MultiStageData {
vs: fun(&self.vs),
fs: fun(&self.fs),
cs: fun(&self.cs),
}
}
fn iter<'a>(&'a self) -> impl Iterator<Item = &'a T> {
iter::once(&self.vs)
.chain(iter::once(&self.fs))
.chain(iter::once(&self.cs))
}
fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut T> {
iter::once(&mut self.vs)
.chain(iter::once(&mut self.fs))
.chain(iter::once(&mut self.cs))
}
}
type MultiStageResourceCounters = MultiStageData<ResourceData<ResourceIndex>>;
#[derive(Debug)]
struct BindGroupLayoutInfo {
base_resource_indices: MultiStageResourceCounters,
dynamic_buffers: Vec<MultiStageData<ResourceIndex>>,
sized_buffer_bindings: Vec<(u32, wgt::ShaderStage)>,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct PushConstantsStage {
count: u32,
buffer_index: ResourceIndex,
}
#[derive(Debug)]
pub struct PipelineLayout {
naga_options: naga::back::msl::Options,
bind_group_infos: ArrayVec<[BindGroupLayoutInfo; crate::MAX_BIND_GROUPS]>,
push_constants_infos: MultiStageData<Option<PushConstantsStage>>,
}
trait AsNative {
type Native;
fn from(native: &Self::Native) -> Self;
fn as_native(&self) -> &Self::Native;
}
type BufferPtr = NonNull<mtl::MTLBuffer>;
type TexturePtr = NonNull<mtl::MTLTexture>;
type SamplerPtr = NonNull<mtl::MTLSamplerState>;
type ResourcePtr = NonNull<mtl::MTLResource>;
impl AsNative for BufferPtr {
type Native = mtl::BufferRef;
#[inline]
fn from(native: &Self::Native) -> Self {
unsafe { NonNull::new_unchecked(native.as_ptr()) }
}
#[inline]
fn as_native(&self) -> &Self::Native {
unsafe { Self::Native::from_ptr(self.as_ptr()) }
}
}
impl AsNative for TexturePtr {
type Native = mtl::TextureRef;
#[inline]
fn from(native: &Self::Native) -> Self {
unsafe { NonNull::new_unchecked(native.as_ptr()) }
}
#[inline]
fn as_native(&self) -> &Self::Native {
unsafe { Self::Native::from_ptr(self.as_ptr()) }
}
}
impl AsNative for SamplerPtr {
type Native = mtl::SamplerStateRef;
#[inline]
fn from(native: &Self::Native) -> Self {
unsafe { NonNull::new_unchecked(native.as_ptr()) }
}
#[inline]
fn as_native(&self) -> &Self::Native {
unsafe { Self::Native::from_ptr(self.as_ptr()) }
}
}
#[derive(Debug)]
struct BufferResource {
ptr: BufferPtr,
offset: wgt::BufferAddress,
}
#[derive(Debug, Default)]
pub struct BindGroup {
counters: MultiStageResourceCounters,
buffers: Vec<BufferResource>,
samplers: Vec<SamplerPtr>,
textures: Vec<TexturePtr>,
}
unsafe impl Send for BindGroup {}
unsafe impl Sync for BindGroup {}

View File

@ -693,10 +693,12 @@ bitflags::bitflags! {
const NON_POWER_OF_TWO_MIPMAPPED_TEXTURES = 0x0000_0010;
/// Supports textures that are cube arrays.
const CUBE_ARRAY_TEXTURES = 0x0000_0020;
/// Supports comparison samplers.
const COMPARISON_SAMPLERS = 0x0000_0040;
/// Supports samplers with anisotropic filtering
const ANISOTROPIC_FILTERING = 0x0001_0000;
/// All flags are in their compliant state.
const COMPLIANT = 0x0000_003F;
const COMPLIANT = 0x0000_007F;
}
}