Remove the old render and compute passes

This commit is contained in:
Dzmitry Malyshau 2020-01-12 00:25:19 -05:00
parent 40ac14e92c
commit e0574ee899
9 changed files with 72 additions and 1411 deletions

View File

@ -127,7 +127,7 @@ int main(
.todo = 0
});
WGPURawComputePassId command_pass = //temp name
WGPUComputePassId command_pass =
wgpu_command_encoder_begin_compute_pass(encoder, NULL);
wgpu_compute_pass_set_pipeline(command_pass, compute_pipeline);

View File

@ -249,7 +249,7 @@ int main() {
},
};
WGPURawRenderPassId rpass =
WGPURenderPassId rpass =
wgpu_command_encoder_begin_render_pass(cmd_encoder,
&(WGPURenderPassDescriptor){
.color_attachments = color_attachments,

View File

@ -303,8 +303,6 @@ typedef struct {
WGPUCommandEncoderId parent;
} WGPURawPass;
typedef WGPURawPass *WGPURawComputePassId;
typedef struct {
uint32_t todo;
} WGPUComputePassDescriptor;
@ -358,8 +356,6 @@ typedef struct {
WGPURawRenderTargets targets;
} WGPURawRenderPass;
typedef WGPURawRenderPass *WGPURawRenderPassId;
typedef const WGPUTextureViewId *WGPUOptionRef_TextureViewId;
typedef struct {
@ -413,6 +409,8 @@ typedef struct {
uint32_t todo;
} WGPUCommandBufferDescriptor;
typedef WGPURawPass *WGPUComputePassId;
typedef const char *WGPURawString;
typedef uint64_t WGPUId_ComputePipeline_Dummy;
@ -680,6 +678,8 @@ typedef struct {
typedef WGPUDeviceId WGPUQueueId;
typedef WGPURawRenderPass *WGPURenderPassId;
typedef uint64_t WGPUId_RenderBundle_Dummy;
typedef WGPUId_RenderBundle_Dummy WGPURenderBundleId;
@ -729,11 +729,11 @@ void wgpu_buffer_unmap(WGPUBufferId buffer_id);
void wgpu_command_buffer_destroy(WGPUCommandBufferId command_buffer_id);
WGPURawComputePassId wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId encoder_id,
const WGPUComputePassDescriptor *_desc);
WGPURawPass *wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId encoder_id,
const WGPUComputePassDescriptor *_desc);
WGPURawRenderPassId wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId encoder_id,
const WGPURenderPassDescriptor *desc);
WGPURawRenderPass *wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId encoder_id,
const WGPURenderPassDescriptor *desc);
void wgpu_command_encoder_copy_buffer_to_buffer(WGPUCommandEncoderId command_encoder_id,
WGPUBufferId source,
@ -771,7 +771,7 @@ void wgpu_compute_pass_dispatch_indirect(WGPURawPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
void wgpu_compute_pass_end_pass(WGPURawComputePassId pass_id);
void wgpu_compute_pass_end_pass(WGPUComputePassId pass_id);
void wgpu_compute_pass_insert_debug_marker(WGPURawPass *_pass, WGPURawString _label);
@ -842,53 +842,17 @@ void wgpu_queue_submit(WGPUQueueId queue_id,
const WGPUCommandBufferId *command_buffers,
uintptr_t command_buffers_length);
void wgpu_raw_render_pass_draw(WGPURawRenderPass *pass,
uint32_t vertex_count,
uint32_t instance_count,
uint32_t first_vertex,
uint32_t first_instance);
void wgpu_render_pass_draw(WGPURawRenderPass *pass,
uint32_t vertex_count,
uint32_t instance_count,
uint32_t first_vertex,
uint32_t first_instance);
void wgpu_raw_render_pass_draw_indirect(WGPURawRenderPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
void wgpu_render_pass_draw_indirect(WGPURawRenderPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
void wgpu_raw_render_pass_set_bind_group(WGPURawRenderPass *pass,
uint32_t index,
WGPUBindGroupId bind_group_id,
const WGPUBufferAddress *offsets,
uintptr_t offset_length);
void wgpu_raw_render_pass_set_blend_color(WGPURawRenderPass *pass, const WGPUColor *color);
void wgpu_raw_render_pass_set_index_buffer(WGPURawRenderPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
void wgpu_raw_render_pass_set_pipeline(WGPURawRenderPass *pass, WGPURenderPipelineId pipeline_id);
void wgpu_raw_render_pass_set_scissor(WGPURawRenderPass *pass,
uint32_t x,
uint32_t y,
uint32_t w,
uint32_t h);
void wgpu_raw_render_pass_set_stencil_reference(WGPURawRenderPass *pass, uint32_t value);
void wgpu_raw_render_pass_set_vertex_buffers(WGPURawRenderPass *pass,
uint32_t start_slot,
const WGPUBufferId *buffer_ids,
const WGPUBufferAddress *offsets,
uintptr_t length);
void wgpu_raw_render_pass_set_viewport(WGPURawRenderPass *pass,
float x,
float y,
float w,
float h,
float depth_min,
float depth_max);
void wgpu_render_pass_end_pass(WGPURawRenderPassId pass_id);
void wgpu_render_pass_end_pass(WGPURenderPassId pass_id);
void wgpu_render_pass_execute_bundles(WGPURawRenderPass *_pass,
const WGPURenderBundleId *_bundles,
@ -900,6 +864,42 @@ void wgpu_render_pass_pop_debug_group(WGPURawRenderPass *_pass);
void wgpu_render_pass_push_debug_group(WGPURawRenderPass *_pass, WGPURawString _label);
void wgpu_render_pass_set_bind_group(WGPURawRenderPass *pass,
uint32_t index,
WGPUBindGroupId bind_group_id,
const WGPUBufferAddress *offsets,
uintptr_t offset_length);
void wgpu_render_pass_set_blend_color(WGPURawRenderPass *pass, const WGPUColor *color);
void wgpu_render_pass_set_index_buffer(WGPURawRenderPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
void wgpu_render_pass_set_pipeline(WGPURawRenderPass *pass, WGPURenderPipelineId pipeline_id);
void wgpu_render_pass_set_scissor(WGPURawRenderPass *pass,
uint32_t x,
uint32_t y,
uint32_t w,
uint32_t h);
void wgpu_render_pass_set_stencil_reference(WGPURawRenderPass *pass, uint32_t value);
void wgpu_render_pass_set_vertex_buffers(WGPURawRenderPass *pass,
uint32_t start_slot,
const WGPUBufferId *buffer_ids,
const WGPUBufferAddress *offsets,
uintptr_t length);
void wgpu_render_pass_set_viewport(WGPURawRenderPass *pass,
float x,
float y,
float w,
float h,
float depth_min,
float depth_max);
void wgpu_request_adapter_async(const WGPURequestAdapterOptions *desc,
WGPUBackendBit mask,
WGPURequestAdapterCallback callback,

View File

@ -9,12 +9,10 @@ use crate::{
PhantomSlice,
},
device::{all_buffer_stages, BIND_BUFFER_ALIGNMENT},
hub::{GfxBackend, Global, IdentityFilter, Token},
hub::{GfxBackend, Global, Token},
id,
resource::BufferUsage,
track::TrackerSet,
BufferAddress,
Stored,
};
use hal::command::CommandBuffer as _;
@ -56,48 +54,8 @@ pub struct ComputePassDescriptor {
pub todo: u32,
}
#[derive(Debug)]
pub struct ComputePass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<id::CommandBufferId>,
binder: Binder,
trackers: TrackerSet,
}
impl<B: hal::Backend> ComputePass<B> {
pub(crate) fn new(
raw: B::CommandBuffer,
cmb_id: Stored<id::CommandBufferId>,
trackers: TrackerSet,
max_bind_groups: u32,
) -> Self {
ComputePass {
raw,
cmb_id,
binder: Binder::new(max_bind_groups),
trackers,
}
}
}
// Common routines between render/compute
impl<F: IdentityFilter<id::ComputePassId>> Global<F> {
pub fn compute_pass_end_pass<B: GfxBackend>(&self, pass_id: id::ComputePassId) {
let mut token = Token::root();
let hub = B::hub(self);
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let (pass, _) = hub.compute_passes.unregister(pass_id, &mut token);
let cmb = &mut cmb_guard[pass.cmb_id.value];
// There are no transitions to be made: we've already been inserting barriers
// into the parent command buffer while recording this compute pass.
log::debug!("Compute pass {:?} {:#?}", pass_id, pass.trackers);
cmb.trackers = pass.trackers;
cmb.raw.push(pass.raw);
}
}
impl<F> Global<F> {
pub fn command_encoder_run_compute_pass<B: GfxBackend>(
&self,
@ -253,183 +211,6 @@ impl<F> Global<F> {
}
}
}
pub fn compute_pass_set_bind_group<B: GfxBackend>(
&self,
pass_id: id::ComputePassId,
index: u32,
bind_group_id: id::BindGroupId,
offsets: &[BufferAddress],
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group = pass
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
if cfg!(debug_assertions) {
for off in offsets {
assert_eq!(
*off % BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
BIND_BUFFER_ALIGNMENT
);
}
}
//Note: currently, WebGPU compute passes have synchronization defined
// at a dispatch granularity, so we insert the necessary barriers here.
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
log::trace!(
"Encoding barriers on binding of {:?} in pass {:?}",
bind_group_id,
pass_id
);
CommandBuffer::insert_barriers(
&mut pass.raw,
&mut pass.trackers,
&bind_group.used,
&*buffer_guard,
&*texture_guard,
);
if let Some((pipeline_layout_id, follow_ups)) = pass
.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_ups.clone().map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()));
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
.iter()
.chain(follow_ups.flat_map(|(_, offsets)| offsets))
.map(|&off| off as hal::command::DescriptorSetOffset),
);
}
};
}
// Compute-specific routines
pub fn compute_pass_dispatch<B: GfxBackend>(
&self,
pass_id: id::ComputePassId,
x: u32,
y: u32,
z: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.compute_passes.write(&mut token);
unsafe {
pass_guard[pass_id].raw.dispatch([x, y, z]);
}
}
pub fn compute_pass_dispatch_indirect<B: GfxBackend>(
&self,
pass_id: id::ComputePassId,
indirect_buffer_id: id::BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
let (src_buffer, src_pending) = pass.trackers.buffers.use_replace(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
);
assert!(src_buffer.usage.contains(BufferUsage::INDIRECT));
let barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
unsafe {
pass.raw.pipeline_barrier(
all_buffer_stages() .. all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
pass.raw.dispatch_indirect(&src_buffer.raw, indirect_offset);
}
}
pub fn compute_pass_set_pipeline<B: GfxBackend>(
&self,
pass_id: id::ComputePassId,
pipeline_id: id::ComputePipelineId,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
unsafe {
pass.raw.bind_compute_pipeline(&pipeline.raw);
}
// Rebind resources
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
pass.binder.pipeline_layout_id = Some(pipeline.layout_id);
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
for (index, (entry, &bgl_id)) in pass
.binder
.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
match entry.expect_layout(bgl_id) {
LayoutChange::Match(bg_id, offsets) if is_compatible => {
let desc_set = bind_group_guard[bg_id].raw.raw();
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(desc_set),
offsets.iter().map(|offset| *offset as u32),
);
}
}
LayoutChange::Match(..) | LayoutChange::Unchanged => {}
LayoutChange::Mismatch => {
is_compatible = false;
}
}
}
}
}
}
mod ffi {

View File

@ -14,35 +14,21 @@ pub use self::render::*;
pub use self::transfer::*;
use crate::{
conv,
device::{
MAX_COLOR_TARGETS,
all_buffer_stages,
all_image_stages,
FramebufferKey,
RenderPassContext,
RenderPassKey,
},
hub::{GfxBackend, Global, IdentityFilter, Storage, Token},
hub::{GfxBackend, Global, Storage, Token},
id,
resource::{Buffer, Texture, TextureUsage, TextureViewInner},
resource::{Buffer, Texture},
track::TrackerSet,
Features,
LifeGuard,
Stored,
};
use arrayvec::ArrayVec;
use hal::{
adapter::PhysicalDevice as _,
command::CommandBuffer as _,
device::Device as _,
};
use std::{
borrow::Borrow,
collections::hash_map::Entry,
iter,
marker::PhantomData,
mem,
slice,
@ -50,9 +36,6 @@ use std::{
};
pub type RawRenderPassId = *mut RawRenderPass;
pub type RawComputePassId = *mut RawPass;
#[derive(Clone, Copy, Debug, peek_poke::PeekCopy, peek_poke::Poke)]
struct PhantomSlice<T>(PhantomData<T>);
@ -165,6 +148,8 @@ impl<B: GfxBackend> CommandBuffer<B> {
buffer_guard: &Storage<Buffer<B>, id::BufferId>,
texture_guard: &Storage<Texture<B>, id::TextureId>,
) {
use hal::command::CommandBuffer as _;
debug_assert_eq!(B::VARIANT, base.backend());
debug_assert_eq!(B::VARIANT, head.backend());
@ -215,7 +200,7 @@ pub struct CommandBufferDescriptor {
pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass(
encoder_id: id::CommandEncoderId,
_desc: Option<&ComputePassDescriptor>,
) -> RawComputePassId {
) -> *mut RawPass {
let pass = RawPass::new_compute(encoder_id);
Box::into_raw(Box::new(pass))
}
@ -239,7 +224,7 @@ pub struct RawRenderPass {
pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId,
desc: &RenderPassDescriptor,
) -> RawRenderPassId {
) -> *mut RawRenderPass {
let mut colors: [RawRenderPassColorAttachmentDescriptor; MAX_COLOR_TARGETS] = mem::zeroed();
for (color, at) in colors
.iter_mut()
@ -286,518 +271,3 @@ impl<F> Global<F> {
encoder_id
}
}
impl<F: IdentityFilter<id::RenderPassId>> Global<F> {
pub fn command_encoder_begin_render_pass<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
desc: &RenderPassDescriptor,
id_in: F::Input,
) -> id::RenderPassId {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let device = &device_guard[cmb.device_id.value];
let limits = adapter_guard[device.adapter_id]
.raw
.physical_device
.limits();
let samples_count_limit = limits.framebuffer_color_sample_counts;
let mut current_comb = device.com_allocator.extend(cmb);
unsafe {
current_comb.begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
hal::command::CommandBufferInheritanceInfo::default(),
);
}
let pass = {
let (_, mut token) = hub.buffers.read(&mut token); //skip token
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (view_guard, _) = hub.texture_views.read(&mut token);
let mut extent = None;
let mut used_swap_chain_image = None::<Stored<id::TextureViewId>>;
let color_attachments = unsafe {
slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length)
};
let depth_stencil_attachment = desc.depth_stencil_attachment;
let sample_count = color_attachments
.get(0)
.map(|at| view_guard[at.attachment].samples)
.unwrap_or(1);
assert!(
sample_count & samples_count_limit != 0,
"Attachment sample_count must be supported by physical device limits"
);
const MAX_TOTAL_ATTACHMENTS: usize = 10;
type OutputAttachment<'a> = (
&'a Stored<id::TextureId>,
&'a hal::image::SubresourceRange,
Option<TextureUsage>,
);
let mut output_attachments = ArrayVec::<[OutputAttachment; MAX_TOTAL_ATTACHMENTS]>::new();
log::trace!(
"Encoding render pass begin in command buffer {:?}",
encoder_id
);
let rp_key = {
let depth_stencil = match depth_stencil_attachment {
Some(at) => {
let view = cmb.trackers
.views
.use_extend(&*view_guard, at.attachment, (), ())
.unwrap();
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
let source_id = match view.inner {
TextureViewInner::Native { ref source_id, .. } => source_id,
TextureViewInner::SwapChain { .. } => {
panic!("Unexpected depth/stencil use of swapchain image!")
}
};
// Using render pass for transition.
let consistent_usage = cmb.trackers.textures.query(
source_id.value,
view.range.clone(),
);
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(
usage,
hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL,
).1,
None => hal::image::Layout::DepthStencilAttachmentOptimal,
};
Some(hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: conv::map_load_store_ops(at.depth_load_op, at.depth_store_op),
stencil_ops: conv::map_load_store_ops(
at.stencil_load_op,
at.stencil_store_op,
),
layouts: old_layout .. hal::image::Layout::DepthStencilAttachmentOptimal,
})
}
None => None,
};
let mut colors = ArrayVec::new();
let mut resolves = ArrayVec::new();
for at in color_attachments {
let view = &view_guard[at.attachment];
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
assert_eq!(
view.samples, sample_count,
"All attachments must have the same sample_count"
);
let first_use = cmb.trackers.views.init(
at.attachment,
view.life_guard.add_ref(),
PhantomData,
).is_ok();
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let consistent_usage = cmb.trackers.textures.query(
source_id.value,
view.range.clone(),
);
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(usage, hal::format::Aspects::COLOR).1,
None => hal::image::Layout::ColorAttachmentOptimal,
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { .. } => {
if let Some((ref view_id, _)) = cmb.used_swap_chain {
assert_eq!(view_id.value, at.attachment);
} else {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: at.attachment,
ref_count: view.life_guard.add_ref(),
});
}
let end = hal::image::Layout::Present;
let start = if first_use {
hal::image::Layout::Undefined
} else {
end
};
start .. end
}
};
colors.push(hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: conv::map_load_store_ops(at.load_op, at.store_op),
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts,
});
}
for &resolve_target in color_attachments
.iter()
.flat_map(|at| at.resolve_target)
{
let view = &view_guard[resolve_target];
assert_eq!(extent, Some(view.extent));
assert_eq!(
view.samples, 1,
"All resolve_targets must have a sample_count of 1"
);
let first_use = cmb.trackers.views.init(
resolve_target,
view.life_guard.add_ref(),
PhantomData,
).is_ok();
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
let consistent_usage = cmb.trackers.textures.query(
source_id.value,
view.range.clone(),
);
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(usage, hal::format::Aspects::COLOR).1,
None => hal::image::Layout::ColorAttachmentOptimal,
};
old_layout .. hal::image::Layout::ColorAttachmentOptimal
}
TextureViewInner::SwapChain { .. } => {
if let Some((ref view_id, _)) = cmb.used_swap_chain {
assert_eq!(view_id.value, resolve_target);
} else {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: resolve_target,
ref_count: view.life_guard.add_ref(),
});
}
let end = hal::image::Layout::Present;
let start = if first_use {
hal::image::Layout::Undefined
} else {
end
};
start .. end
}
};
resolves.push(hal::pass::Attachment {
format: Some(conv::map_texture_format(view.format, device.features)),
samples: view.samples,
ops: hal::pass::AttachmentOps::new(
hal::pass::AttachmentLoadOp::DontCare,
hal::pass::AttachmentStoreOp::Store,
),
stencil_ops: hal::pass::AttachmentOps::DONT_CARE,
layouts,
});
}
RenderPassKey {
colors,
resolves,
depth_stencil,
}
};
let mut trackers = TrackerSet::new(B::VARIANT);
for (source_id, view_range, consistent_usage) in output_attachments {
let texture = &texture_guard[source_id.value];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let usage = consistent_usage.unwrap_or(TextureUsage::OUTPUT_ATTACHMENT);
// this is important to record the `first` state.
let _ = trackers.textures.change_replace(
source_id.value,
&source_id.ref_count,
view_range.clone(),
usage,
);
if consistent_usage.is_some() {
// If we expect the texture to be transited to a new state by the
// render pass configuration, make the tracker aware of that.
let _ = trackers.textures.change_replace(
source_id.value,
&source_id.ref_count,
view_range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
);
};
}
let mut render_pass_cache = device.render_passes.lock();
let render_pass = match render_pass_cache.entry(rp_key.clone()) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
let color_ids = [
(0, hal::image::Layout::ColorAttachmentOptimal),
(1, hal::image::Layout::ColorAttachmentOptimal),
(2, hal::image::Layout::ColorAttachmentOptimal),
(3, hal::image::Layout::ColorAttachmentOptimal),
];
let mut resolve_ids = ArrayVec::<[_; MAX_COLOR_TARGETS]>::new();
let mut attachment_index = color_attachments.len();
if color_attachments
.iter()
.any(|at| at.resolve_target.is_some())
{
for (i, at) in color_attachments.iter().enumerate() {
if at.resolve_target.is_none() {
resolve_ids.push((
hal::pass::ATTACHMENT_UNUSED,
hal::image::Layout::ColorAttachmentOptimal,
));
} else {
let sample_count_check =
view_guard[color_attachments[i].attachment].samples;
assert!(sample_count_check > 1, "RenderPassColorAttachmentDescriptor with a resolve_target must have an attachment with sample_count > 1");
resolve_ids.push((
attachment_index,
hal::image::Layout::ColorAttachmentOptimal,
));
attachment_index += 1;
}
}
}
let depth_id = (
attachment_index,
hal::image::Layout::DepthStencilAttachmentOptimal,
);
let subpass = hal::pass::SubpassDesc {
colors: &color_ids[.. color_attachments.len()],
resolves: &resolve_ids,
depth_stencil: depth_stencil_attachment.map(|_| &depth_id),
inputs: &[],
preserves: &[],
};
let pass = unsafe {
device
.raw
.create_render_pass(e.key().all(), &[subpass], &[])
}
.unwrap();
e.insert(pass)
}
};
let mut framebuffer_cache;
let fb_key = FramebufferKey {
colors: color_attachments.iter().map(|at| at.attachment).collect(),
resolves: color_attachments
.iter()
.filter_map(|at| at.resolve_target)
.cloned()
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| at.attachment),
};
let framebuffer = match used_swap_chain_image.take() {
Some(view_id) => {
assert!(cmb.used_swap_chain.is_none());
// Always create a new framebuffer and delete it after presentation.
let attachments = fb_key.all().map(|&id| match view_guard[id].inner {
TextureViewInner::Native { ref raw, .. } => raw,
TextureViewInner::SwapChain { ref image, .. } => Borrow::borrow(image),
});
let framebuffer = unsafe {
device
.raw
.create_framebuffer(&render_pass, attachments, extent.unwrap())
.unwrap()
};
cmb.used_swap_chain = Some((view_id, framebuffer));
&mut cmb.used_swap_chain.as_mut().unwrap().1
}
None => {
// Cache framebuffers by the device.
framebuffer_cache = device.framebuffers.lock();
match framebuffer_cache.entry(fb_key) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
let fb = {
let attachments =
e.key().all().map(|&id| match view_guard[id].inner {
TextureViewInner::Native { ref raw, .. } => raw,
TextureViewInner::SwapChain { ref image, .. } => {
Borrow::borrow(image)
}
});
unsafe {
device.raw.create_framebuffer(
&render_pass,
attachments,
extent.unwrap(),
)
}
.unwrap()
};
e.insert(fb)
}
}
}
};
let rect = {
let ex = extent.unwrap();
hal::pso::Rect {
x: 0,
y: 0,
w: ex.width as _,
h: ex.height as _,
}
};
let clear_values = color_attachments
.iter()
.zip(&rp_key.colors)
.flat_map(|(at, key)| {
match at.load_op {
LoadOp::Load => None,
LoadOp::Clear => {
use hal::format::ChannelType;
//TODO: validate sign/unsign and normalized ranges of the color values
let value = match key.format.unwrap().base_format().1 {
ChannelType::Unorm
| ChannelType::Snorm
| ChannelType::Ufloat
| ChannelType::Sfloat
| ChannelType::Uscaled
| ChannelType::Sscaled
| ChannelType::Srgb => hal::command::ClearColor {
float32: conv::map_color_f32(&at.clear_color),
},
ChannelType::Sint => hal::command::ClearColor {
sint32: conv::map_color_i32(&at.clear_color),
},
ChannelType::Uint => hal::command::ClearColor {
uint32: conv::map_color_u32(&at.clear_color),
},
};
Some(hal::command::ClearValue { color: value })
}
}
})
.chain(depth_stencil_attachment.and_then(|at| {
match (at.depth_load_op, at.stencil_load_op) {
(LoadOp::Load, LoadOp::Load) => None,
(LoadOp::Clear, _) | (_, LoadOp::Clear) => {
let value = hal::command::ClearDepthStencil {
depth: at.clear_depth,
stencil: at.clear_stencil,
};
Some(hal::command::ClearValue {
depth_stencil: value,
})
}
}
}));
unsafe {
current_comb.begin_render_pass(
render_pass,
framebuffer,
rect,
clear_values,
hal::command::SubpassContents::Inline,
);
current_comb.set_scissors(0, iter::once(&rect));
current_comb.set_viewports(
0,
iter::once(hal::pso::Viewport {
rect,
depth: 0.0 .. 1.0,
}),
);
}
let context = RenderPassContext {
colors: color_attachments
.iter()
.map(|at| view_guard[at.attachment].format)
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| at.resolve_target)
.map(|resolve| view_guard[*resolve].format)
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| view_guard[at.attachment].format),
};
RenderPass::new(
current_comb,
Stored {
value: encoder_id,
ref_count: cmb.life_guard.add_ref(),
},
context,
trackers,
sample_count,
cmb.features.max_bind_groups,
)
};
hub.render_passes.register_identity(id_in, pass, &mut token)
}
}
impl<F: IdentityFilter<id::ComputePassId>> Global<F> {
pub fn command_encoder_begin_compute_pass<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
_desc: &ComputePassDescriptor,
id_in: F::Input,
) -> id::ComputePassId {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let raw = cmb.raw.pop().unwrap();
let trackers = mem::replace(&mut cmb.trackers, TrackerSet::new(encoder_id.backend()));
let stored = Stored {
value: encoder_id,
ref_count: cmb.life_guard.add_ref(),
};
let pass = ComputePass::new(raw, stored, trackers, cmb.features.max_bind_groups);
hub.compute_passes
.register_identity(id_in, pass, &mut token)
}
}

View File

@ -5,7 +5,6 @@
use crate::{
command::{
bind::{Binder, LayoutChange},
CommandBuffer,
PhantomSlice,
RawRenderTargets,
},
@ -18,7 +17,7 @@ use crate::{
MAX_VERTEX_BUFFERS,
MAX_COLOR_TARGETS,
},
hub::{GfxBackend, Global, IdentityFilter, Token},
hub::{GfxBackend, Global, Token},
id,
pipeline::{IndexFormat, InputStepMode, PipelineFlags},
resource::{BufferUsage, TextureUsage, TextureViewInner},
@ -282,114 +281,8 @@ impl State {
}
}
#[derive(Debug)]
pub struct RenderPass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<id::CommandBufferId>,
context: RenderPassContext,
binder: Binder,
trackers: TrackerSet,
blend_color_status: OptionalState,
stencil_reference_status: OptionalState,
index_state: IndexState,
vertex_state: VertexState,
sample_count: u8,
}
impl<B: GfxBackend> RenderPass<B> {
pub(crate) fn new(
raw: B::CommandBuffer,
cmb_id: Stored<id::CommandBufferId>,
context: RenderPassContext,
trackers: TrackerSet,
sample_count: u8,
max_bind_groups: u32,
) -> Self {
RenderPass {
raw,
cmb_id,
context,
binder: Binder::new(max_bind_groups),
trackers,
blend_color_status: OptionalState::Unused,
stencil_reference_status: OptionalState::Unused,
index_state: IndexState {
bound_buffer_view: None,
format: IndexFormat::Uint16,
limit: 0,
},
vertex_state: VertexState {
inputs: [VertexBufferState::EMPTY; MAX_VERTEX_BUFFERS],
vertex_limit: 0,
instance_limit: 0,
},
sample_count,
}
}
fn is_ready(&self) -> Result<(), DrawError> {
//TODO: vertex buffers
let bind_mask = self.binder.invalid_mask();
if bind_mask != 0 {
//let (expected, provided) = self.binder.entries[index as usize].info();
return Err(DrawError::IncompatibleBindGroup {
index: bind_mask.trailing_zeros() as u32,
});
}
if self.blend_color_status == OptionalState::Required {
return Err(DrawError::MissingBlendColor);
}
if self.stencil_reference_status == OptionalState::Required {
return Err(DrawError::MissingStencilReference);
}
Ok(())
}
}
// Common routines between render/compute
impl<F: IdentityFilter<id::RenderPassId>> Global<F> {
pub fn render_pass_end_pass<B: GfxBackend>(&self, pass_id: id::RenderPassId) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let (mut pass, mut token) = hub.render_passes.unregister(pass_id, &mut token);
unsafe {
pass.raw.end_render_pass();
}
pass.trackers.optimize();
log::debug!("Render pass {:?} {:#?}", pass_id, pass.trackers);
let cmb = &mut cmb_guard[pass.cmb_id.value];
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
match cmb.raw.last_mut() {
Some(last) => {
log::trace!("Encoding barriers before pass {:?}", pass_id);
CommandBuffer::insert_barriers(
last,
&mut cmb.trackers,
&pass.trackers,
&*buffer_guard,
&*texture_guard,
);
unsafe { last.finish() };
}
None => {
cmb.trackers.merge_extend(&pass.trackers);
}
}
if false {
log::debug!("Command buffer {:?} after render pass {:#?}",
pass.cmb_id.value, cmb.trackers);
}
cmb.raw.push(pass.raw);
}
}
impl<F> Global<F> {
pub fn command_encoder_run_render_pass<B: GfxBackend>(
&self,
@ -1201,471 +1094,6 @@ impl<F> Global<F> {
}
}
}
pub fn render_pass_set_bind_group<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
index: u32,
bind_group_id: id::BindGroupId,
offsets: &[BufferAddress],
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group = pass
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
if cfg!(debug_assertions) {
for off in offsets {
assert_eq!(
*off % BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
BIND_BUFFER_ALIGNMENT
);
}
}
pass.trackers.merge_extend(&bind_group.used);
if let Some((pipeline_layout_id, follow_ups)) = pass
.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw())
.chain(follow_ups.clone().map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()));
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
.iter()
.chain(follow_ups.flat_map(|(_, offsets)| offsets))
.map(|&off| off as hal::command::DescriptorSetOffset),
);
}
};
}
// Render-specific routines
pub fn render_pass_set_index_buffer<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
buffer_id: id::BufferId,
offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDEX));
let range = offset .. buffer.size;
pass.index_state.bound_buffer_view = Some((buffer_id, range));
pass.index_state.update_limit();
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
offset,
index_type: conv::map_index_format(pass.index_state.format),
};
unsafe {
pass.raw.bind_index_buffer(view);
}
}
pub fn render_pass_set_vertex_buffers<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
start_slot: u32,
buffers: &[id::BufferId],
offsets: &[BufferAddress],
) {
let hub = B::hub(self);
let mut token = Token::root();
assert_eq!(buffers.len(), offsets.len());
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
for (vbs, (&id, &offset)) in pass.vertex_state.inputs[start_slot as usize ..]
.iter_mut()
.zip(buffers.iter().zip(offsets))
{
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, id, (), BufferUsage::VERTEX)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::VERTEX));
vbs.total_size = buffer.size - offset;
}
pass.vertex_state.update_limits();
let buffers = buffers
.iter()
.map(|&id| &buffer_guard[id].raw)
.zip(offsets.iter().cloned());
unsafe {
pass.raw.bind_vertex_buffers(start_slot, buffers);
}
}
pub fn render_pass_draw<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
assert!(
first_vertex + vertex_count <= pass.vertex_state.vertex_limit,
"Vertex out of range!"
);
assert!(
first_instance + instance_count <= pass.vertex_state.instance_limit,
"Instance out of range!"
);
unsafe {
pass.raw.draw(
first_vertex .. first_vertex + vertex_count,
first_instance .. first_instance + instance_count,
);
}
}
pub fn render_pass_draw_indirect<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
indirect_buffer_id: id::BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
let buffer = pass
.trackers
.buffers
.use_extend(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
unsafe {
pass.raw.draw_indirect(&buffer.raw, indirect_offset, 1, 0);
}
}
pub fn render_pass_draw_indexed<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
//TODO: validate that base_vertex + max_index() is within the provided range
assert!(
first_index + index_count <= pass.index_state.limit,
"Index out of range!"
);
assert!(
first_instance + instance_count <= pass.vertex_state.instance_limit,
"Instance out of range!"
);
unsafe {
pass.raw.draw_indexed(
first_index .. first_index + index_count,
base_vertex,
first_instance .. first_instance + instance_count,
);
}
}
pub fn render_pass_draw_indexed_indirect<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
indirect_buffer_id: id::BufferId,
indirect_offset: BufferAddress,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
let buffer = pass
.trackers
.buffers
.use_extend(
&*buffer_guard,
indirect_buffer_id,
(),
BufferUsage::INDIRECT,
)
.unwrap();
assert!(buffer.usage.contains(BufferUsage::INDIRECT));
unsafe {
pass.raw
.draw_indexed_indirect(&buffer.raw, indirect_offset, 1, 0);
}
}
pub fn render_pass_set_pipeline<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
pipeline_id: id::RenderPipelineId,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
assert!(
pass.context.compatible(&pipeline.pass_context),
"The render pipeline is not compatible with the pass!"
);
assert_eq!(
pipeline.sample_count, pass.sample_count,
"The render pipeline and renderpass have mismatching sample_count"
);
pass.blend_color_status
.require(pipeline.flags.contains(PipelineFlags::BLEND_COLOR));
pass.stencil_reference_status
.require(pipeline.flags.contains(PipelineFlags::STENCIL_REFERENCE));
unsafe {
pass.raw.bind_graphics_pipeline(&pipeline.raw);
}
// Rebind resource
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
pass.binder.pipeline_layout_id = Some(pipeline.layout_id);
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
let mut is_compatible = true;
for (index, (entry, &bgl_id)) in pass
.binder
.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
match entry.expect_layout(bgl_id) {
LayoutChange::Match(bg_id, offsets) if is_compatible => {
let desc_set = bind_group_guard[bg_id].raw.raw();
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(desc_set),
offsets.iter().map(|offset| *offset as u32),
);
}
}
LayoutChange::Match(..) | LayoutChange::Unchanged => {}
LayoutChange::Mismatch => {
is_compatible = false;
}
}
}
}
// Rebind index buffer if the index format has changed with the pipeline switch
if pass.index_state.format != pipeline.index_format {
pass.index_state.format = pipeline.index_format;
pass.index_state.update_limit();
if let Some((buffer_id, ref range)) = pass.index_state.bound_buffer_view {
let (buffer_guard, _) = hub.buffers.read(&mut token);
let buffer = pass
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUsage::INDEX)
.unwrap();
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
offset: range.start,
index_type: conv::map_index_format(pass.index_state.format),
};
unsafe {
pass.raw.bind_index_buffer(view);
}
}
}
// Update vertex buffer limits
for (vbs, &(stride, rate)) in pass
.vertex_state
.inputs
.iter_mut()
.zip(&pipeline.vertex_strides)
{
vbs.stride = stride;
vbs.rate = rate;
}
for vbs in pass.vertex_state.inputs[pipeline.vertex_strides.len() ..].iter_mut() {
vbs.stride = 0;
vbs.rate = InputStepMode::Vertex;
}
pass.vertex_state.update_limits();
}
pub fn render_pass_set_blend_color<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
color: &Color,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.blend_color_status = OptionalState::Set;
unsafe {
pass.raw.set_blend_constants(conv::map_color_f32(color));
}
}
pub fn render_pass_set_stencil_reference<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
value: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.stencil_reference_status = OptionalState::Set;
unsafe {
pass.raw.set_stencil_reference(hal::pso::Face::all(), value);
}
}
pub fn render_pass_set_viewport<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
x: f32,
y: f32,
w: f32,
h: f32,
min_depth: f32,
max_depth: f32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {
use std::convert::TryFrom;
use std::i16;
pass.raw.set_viewports(
0,
&[hal::pso::Viewport {
rect: hal::pso::Rect {
x: i16::try_from(x.round() as i64).unwrap_or(0),
y: i16::try_from(y.round() as i64).unwrap_or(0),
w: i16::try_from(w.round() as i64).unwrap_or(i16::MAX),
h: i16::try_from(h.round() as i64).unwrap_or(i16::MAX),
},
depth: min_depth .. max_depth,
}],
);
}
}
pub fn render_pass_set_scissor_rect<B: GfxBackend>(
&self,
pass_id: id::RenderPassId,
x: u32,
y: u32,
w: u32,
h: u32,
) {
let hub = B::hub(self);
let mut token = Token::root();
let (mut pass_guard, _) = hub.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {
use std::convert::TryFrom;
use std::i16;
pass.raw.set_scissors(
0,
&[hal::pso::Rect {
x: i16::try_from(x).unwrap_or(0),
y: i16::try_from(y).unwrap_or(0),
w: i16::try_from(w).unwrap_or(i16::MAX),
h: i16::try_from(h).unwrap_or(i16::MAX),
}],
);
}
}
}
mod ffi {

View File

@ -5,7 +5,7 @@
use crate::{
backend,
binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
command::{CommandBuffer, ComputePass, RenderPass},
command::CommandBuffer,
device::{Device, ShaderModule},
id::{
AdapterId,
@ -13,11 +13,9 @@ use crate::{
BindGroupLayoutId,
BufferId,
CommandBufferId,
ComputePassId,
ComputePipelineId,
DeviceId,
PipelineLayoutId,
RenderPassId,
RenderPipelineId,
SamplerId,
ShaderModuleId,
@ -181,18 +179,10 @@ impl<B: hal::Backend> Access<BindGroup<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for Root {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for Device<B> {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for SwapChain<B> {}
impl<B: hal::Backend> Access<ComputePass<B>> for Root {}
impl<B: hal::Backend> Access<ComputePass<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<ComputePass<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<RenderPass<B>> for Root {}
impl<B: hal::Backend> Access<RenderPass<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<RenderPass<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<ComputePipeline<B>> for Root {}
impl<B: hal::Backend> Access<ComputePipeline<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<ComputePipeline<B>> for ComputePass<B> {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for Root {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for RenderPass<B> {}
impl<B: hal::Backend> Access<ShaderModule<B>> for Root {}
impl<B: hal::Backend> Access<ShaderModule<B>> for PipelineLayout<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for Root {}
@ -200,9 +190,7 @@ impl<B: hal::Backend> Access<Buffer<B>> for Device<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for BindGroupLayout<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for ComputePass<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for ComputePipeline<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for RenderPass<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for RenderPipeline<B> {}
impl<B: hal::Backend> Access<Texture<B>> for Root {}
impl<B: hal::Backend> Access<Texture<B>> for Device<B> {}
@ -379,9 +367,7 @@ pub struct Hub<B: hal::Backend, F> {
pub bind_group_layouts: Registry<BindGroupLayout<B>, BindGroupLayoutId, F>,
pub bind_groups: Registry<BindGroup<B>, BindGroupId, F>,
pub command_buffers: Registry<CommandBuffer<B>, CommandBufferId, F>,
pub render_passes: Registry<RenderPass<B>, RenderPassId, F>,
pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId, F>,
pub compute_passes: Registry<ComputePass<B>, ComputePassId, F>,
pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId, F>,
pub buffers: Registry<Buffer<B>, BufferId, F>,
pub textures: Registry<Texture<B>, TextureId, F>,
@ -400,9 +386,7 @@ impl<B: GfxBackend, F: Default> Default for Hub<B, F> {
bind_group_layouts: Registry::new(B::VARIANT),
bind_groups: Registry::new(B::VARIANT),
command_buffers: Registry::new(B::VARIANT),
render_passes: Registry::new(B::VARIANT),
render_pipelines: Registry::new(B::VARIANT),
compute_passes: Registry::new(B::VARIANT),
compute_pipelines: Registry::new(B::VARIANT),
buffers: Registry::new(B::VARIANT),
textures: Registry::new(B::VARIANT),
@ -459,9 +443,7 @@ impl<B: hal::Backend, F> Drop for Hub<B, F> {
//TODO:
// self.compute_pipelines
// self.compute_passes
// self.render_pipelines
// self.render_passes
// self.bind_group_layouts
// self.pipeline_layouts
// self.shader_modules

View File

@ -115,9 +115,9 @@ pub type ComputePipelineId = Id<crate::pipeline::ComputePipeline<Dummy>>;
// Command
pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type CommandEncoderId = CommandBufferId;
pub type RenderPassId = *mut crate::command::RawRenderPass;
pub type ComputePassId = *mut crate::command::RawPass;
pub type RenderBundleId = Id<crate::command::RenderBundle<Dummy>>;
pub type RenderPassId = Id<crate::command::RenderPass<Dummy>>;
pub type ComputePassId = Id<crate::command::ComputePass<Dummy>>;
// Swap chain
pub type SwapChainId = Id<crate::swap_chain::SwapChain<Dummy>>;

View File

@ -77,17 +77,17 @@ pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_end_pass(pass_id: core::command::RawRenderPassId) {
pub unsafe extern "C" fn wgpu_render_pass_end_pass(pass_id: id::RenderPassId) {
let (pass_data, encoder_id, targets) = Box::from_raw(pass_id).finish_render();
let color_attachments: arrayvec::ArrayVec<[_; core::device::MAX_COLOR_TARGETS]> = targets.colors
.iter()
.flat_map(|at| {
if at.attachment == core::id::TextureViewId::ERROR {
if at.attachment == id::TextureViewId::ERROR {
None
} else {
Some(core::command::RenderPassColorAttachmentDescriptor {
attachment: at.attachment,
resolve_target: if at.resolve_target == core::id::TextureViewId::ERROR {
resolve_target: if at.resolve_target == id::TextureViewId::ERROR {
None
} else {
Some(&at.resolve_target)
@ -99,7 +99,7 @@ pub unsafe extern "C" fn wgpu_render_pass_end_pass(pass_id: core::command::RawRe
}
})
.collect();
let depth_stencil_attachment = if targets.depth_stencil.attachment == core::id::TextureViewId::ERROR {
let depth_stencil_attachment = if targets.depth_stencil.attachment == id::TextureViewId::ERROR {
None
} else {
Some(&targets.depth_stencil)
@ -108,7 +108,7 @@ pub unsafe extern "C" fn wgpu_render_pass_end_pass(pass_id: core::command::RawRe
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_end_pass(pass_id: core::command::RawComputePassId) {
pub unsafe extern "C" fn wgpu_compute_pass_end_pass(pass_id: id::ComputePassId) {
let (pass_data, encoder_id) = Box::from_raw(pass_id).finish_compute();
gfx_select!(encoder_id => GLOBAL.command_encoder_run_compute_pass(encoder_id, &pass_data))
}