mirror of
https://github.com/gfx-rs/wgpu.git
synced 2024-11-22 14:55:05 +00:00
Merge #195
195: Fix tracking and improve draw call validation r=grovesNL a=kvark Fixes #196 ## Validation Note: the most difficult bit is still missing - we need to know the maximum index in an index buffer. This is not meant to be done in this PR though, the logic is good enough on its own. ## Tracking Implements a custom iterator with Drop. Also, reset tracking of resources when a command buffer is done. Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
commit
77457e408d
@ -1,6 +1,6 @@
|
||||
|
||||
|
||||
/* Generated with cbindgen:0.8.7 */
|
||||
/* Generated with cbindgen:0.8.6 */
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#define WGPU_LOCAL
|
||||
|
||||
/* Generated with cbindgen:0.8.7 */
|
||||
/* Generated with cbindgen:0.8.6 */
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
@ -11,6 +11,8 @@
|
||||
|
||||
#define WGPUMAX_COLOR_TARGETS 4
|
||||
|
||||
#define WGPUMAX_VERTEX_BUFFERS 8
|
||||
|
||||
typedef enum {
|
||||
WGPUAddressMode_ClampToEdge = 0,
|
||||
WGPUAddressMode_Repeat = 1,
|
||||
|
@ -99,7 +99,8 @@ impl<B: hal::Backend> CommandAllocator<B> {
|
||||
pool.available.pop().unwrap()
|
||||
}
|
||||
|
||||
pub fn after_submit(&self, cmd_buf: CommandBuffer<B>, submit_index: SubmissionIndex) {
|
||||
pub fn after_submit(&self, mut cmd_buf: CommandBuffer<B>, submit_index: SubmissionIndex) {
|
||||
cmd_buf.trackers.clear();
|
||||
cmd_buf
|
||||
.life_guard
|
||||
.submission_index
|
||||
|
@ -144,34 +144,33 @@ pub extern "C" fn wgpu_compute_pass_set_pipeline(
|
||||
pass.raw.bind_compute_pipeline(&pipeline.raw);
|
||||
}
|
||||
|
||||
if pass.binder.pipeline_layout_id == Some(pipeline.layout_id.clone()) {
|
||||
return;
|
||||
}
|
||||
// Rebind resources
|
||||
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
|
||||
let pipeline_layout_guard = HUB.pipeline_layouts.read();
|
||||
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
|
||||
let bind_group_guard = HUB.bind_groups.read();
|
||||
|
||||
let pipeline_layout_guard = HUB.pipeline_layouts.read();
|
||||
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
|
||||
let bind_group_guard = HUB.bind_groups.read();
|
||||
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
|
||||
pass.binder
|
||||
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
|
||||
|
||||
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
|
||||
pass.binder
|
||||
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
|
||||
|
||||
for (index, (entry, &bgl_id)) in pass
|
||||
.binder
|
||||
.entries
|
||||
.iter_mut()
|
||||
.zip(&pipeline_layout.bind_group_layout_ids)
|
||||
.enumerate()
|
||||
{
|
||||
if let LayoutChange::Match(bg_id) = entry.expect_layout(bgl_id) {
|
||||
let desc_set = &bind_group_guard[bg_id].raw;
|
||||
unsafe {
|
||||
pass.raw.bind_compute_descriptor_sets(
|
||||
&pipeline_layout.raw,
|
||||
index,
|
||||
iter::once(desc_set),
|
||||
&[],
|
||||
);
|
||||
for (index, (entry, &bgl_id)) in pass
|
||||
.binder
|
||||
.entries
|
||||
.iter_mut()
|
||||
.zip(&pipeline_layout.bind_group_layout_ids)
|
||||
.enumerate()
|
||||
{
|
||||
if let LayoutChange::Match(bg_id) = entry.expect_layout(bgl_id) {
|
||||
let desc_set = &bind_group_guard[bg_id].raw;
|
||||
unsafe {
|
||||
pass.raw.bind_compute_descriptor_sets(
|
||||
&pipeline_layout.raw,
|
||||
index,
|
||||
iter::once(desc_set),
|
||||
&[],
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ use crate::{
|
||||
RenderPassKey,
|
||||
},
|
||||
hub::{Storage, HUB},
|
||||
pipeline::IndexFormat,
|
||||
resource::TexturePlacement,
|
||||
swap_chain::{SwapChainLink, SwapImageEpoch},
|
||||
track::{DummyUsage, Stitch, TrackerSet},
|
||||
@ -393,11 +392,6 @@ pub fn command_encoder_begin_render_pass(
|
||||
depth_stencil: depth_stencil_attachment.map(|at| view_guard[at.attachment].format),
|
||||
};
|
||||
|
||||
let index_state = IndexState {
|
||||
bound_buffer_view: None,
|
||||
format: IndexFormat::Uint16,
|
||||
};
|
||||
|
||||
RenderPass::new(
|
||||
current_comb,
|
||||
Stored {
|
||||
@ -405,7 +399,6 @@ pub fn command_encoder_begin_render_pass(
|
||||
ref_count: cmb.life_guard.ref_count.clone(),
|
||||
},
|
||||
context,
|
||||
index_state,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
use crate::{
|
||||
command::bind::{Binder, LayoutChange},
|
||||
conv,
|
||||
device::RenderPassContext,
|
||||
device::{MAX_VERTEX_BUFFERS, RenderPassContext},
|
||||
hub::HUB,
|
||||
pipeline::{IndexFormat, PipelineFlags},
|
||||
pipeline::{IndexFormat, InputStepMode, PipelineFlags},
|
||||
resource::BufferUsage,
|
||||
track::{Stitch, TrackerSet},
|
||||
BindGroupId,
|
||||
@ -20,7 +20,7 @@ use crate::{
|
||||
|
||||
use hal::command::RawCommandBuffer;
|
||||
|
||||
use std::{iter, slice};
|
||||
use std::{iter, ops::Range, slice};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum OptionalState {
|
||||
@ -50,8 +50,63 @@ enum DrawError {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct IndexState {
|
||||
pub(crate) bound_buffer_view: Option<(BufferId, BufferAddress)>,
|
||||
pub(crate) format: IndexFormat,
|
||||
bound_buffer_view: Option<(BufferId, Range<BufferAddress>)>,
|
||||
format: IndexFormat,
|
||||
limit: u32,
|
||||
}
|
||||
|
||||
impl IndexState {
|
||||
fn update_limit(&mut self) {
|
||||
self.limit = match self.bound_buffer_view {
|
||||
Some((_, ref range)) => {
|
||||
let shift = match self.format {
|
||||
IndexFormat::Uint16 => 1,
|
||||
IndexFormat::Uint32 => 2,
|
||||
};
|
||||
((range.end - range.start) >> shift) as u32
|
||||
}
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct VertexBufferState {
|
||||
total_size: BufferAddress,
|
||||
stride: BufferAddress,
|
||||
rate: InputStepMode,
|
||||
}
|
||||
|
||||
impl VertexBufferState {
|
||||
const EMPTY: Self = VertexBufferState {
|
||||
total_size: 0,
|
||||
stride: 0,
|
||||
rate: InputStepMode::Vertex,
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VertexState {
|
||||
inputs: [VertexBufferState; MAX_VERTEX_BUFFERS],
|
||||
vertex_limit: u32,
|
||||
instance_limit: u32,
|
||||
}
|
||||
|
||||
impl VertexState {
|
||||
fn update_limits(&mut self) {
|
||||
self.vertex_limit = !0;
|
||||
self.instance_limit = !0;
|
||||
for vbs in &self.inputs {
|
||||
if vbs.stride == 0 {
|
||||
continue
|
||||
}
|
||||
let limit = (vbs.total_size / vbs.stride) as u32;
|
||||
match vbs.rate {
|
||||
InputStepMode::Vertex => self.vertex_limit = self.vertex_limit.min(limit),
|
||||
InputStepMode::Instance => self.instance_limit = self.instance_limit.min(limit),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RenderPass<B: hal::Backend> {
|
||||
@ -63,6 +118,7 @@ pub struct RenderPass<B: hal::Backend> {
|
||||
blend_color_status: OptionalState,
|
||||
stencil_reference_status: OptionalState,
|
||||
index_state: IndexState,
|
||||
vertex_state: VertexState,
|
||||
}
|
||||
|
||||
impl<B: hal::Backend> RenderPass<B> {
|
||||
@ -70,7 +126,6 @@ impl<B: hal::Backend> RenderPass<B> {
|
||||
raw: B::CommandBuffer,
|
||||
cmb_id: Stored<CommandBufferId>,
|
||||
context: RenderPassContext,
|
||||
index_state: IndexState,
|
||||
) -> Self {
|
||||
RenderPass {
|
||||
raw,
|
||||
@ -80,7 +135,16 @@ impl<B: hal::Backend> RenderPass<B> {
|
||||
trackers: TrackerSet::new(),
|
||||
blend_color_status: OptionalState::Unused,
|
||||
stencil_reference_status: OptionalState::Unused,
|
||||
index_state,
|
||||
index_state: IndexState {
|
||||
bound_buffer_view: None,
|
||||
format: IndexFormat::Uint16,
|
||||
limit: 0,
|
||||
},
|
||||
vertex_state: VertexState {
|
||||
inputs: [VertexBufferState::EMPTY; MAX_VERTEX_BUFFERS],
|
||||
vertex_limit: 0,
|
||||
instance_limit: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,6 +272,10 @@ pub extern "C" fn wgpu_render_pass_set_index_buffer(
|
||||
.get_with_extended_usage(&*buffer_guard, buffer_id, BufferUsage::INDEX)
|
||||
.unwrap();
|
||||
|
||||
let range = offset .. buffer.size;
|
||||
pass.index_state.bound_buffer_view = Some((buffer_id, range));
|
||||
pass.index_state.update_limit();
|
||||
|
||||
let view = hal::buffer::IndexBufferView {
|
||||
buffer: &buffer.raw,
|
||||
offset,
|
||||
@ -217,8 +285,6 @@ pub extern "C" fn wgpu_render_pass_set_index_buffer(
|
||||
unsafe {
|
||||
pass.raw.bind_index_buffer(view);
|
||||
}
|
||||
|
||||
pass.index_state.bound_buffer_view = Some((buffer_id, offset));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
@ -234,12 +300,18 @@ pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
|
||||
let offsets = unsafe { slice::from_raw_parts(offset_ptr, count) };
|
||||
|
||||
let pass = &mut pass_guard[pass_id];
|
||||
for &id in buffers {
|
||||
pass.trackers
|
||||
for (vbs, (&id, &offset)) in pass.vertex_state.inputs.iter_mut().zip(buffers.iter().zip(offsets)) {
|
||||
let buffer = pass.trackers
|
||||
.buffers
|
||||
.get_with_extended_usage(&*buffer_guard, id, BufferUsage::VERTEX)
|
||||
.unwrap();
|
||||
vbs.total_size = buffer.size - offset;
|
||||
}
|
||||
for vbs in pass.vertex_state.inputs[count..].iter_mut() {
|
||||
vbs.total_size = 0;
|
||||
}
|
||||
|
||||
pass.vertex_state.update_limits();
|
||||
|
||||
let buffers = buffers
|
||||
.iter()
|
||||
@ -263,6 +335,9 @@ pub extern "C" fn wgpu_render_pass_draw(
|
||||
let pass = &mut pass_guard[pass_id];
|
||||
pass.is_ready().unwrap();
|
||||
|
||||
assert!(first_vertex + vertex_count <= pass.vertex_state.vertex_limit, "Vertex out of range!");
|
||||
assert!(first_instance + instance_count <= pass.vertex_state.instance_limit, "Instance out of range!");
|
||||
|
||||
unsafe {
|
||||
pass.raw.draw(
|
||||
first_vertex .. first_vertex + vertex_count,
|
||||
@ -284,6 +359,10 @@ pub extern "C" fn wgpu_render_pass_draw_indexed(
|
||||
let pass = &mut pass_guard[pass_id];
|
||||
pass.is_ready().unwrap();
|
||||
|
||||
//TODO: validate that base_vertex + max_index() is within the provided range
|
||||
assert!(first_index + index_count <= pass.index_state.limit, "Index out of range!");
|
||||
assert!(first_instance + instance_count <= pass.vertex_state.instance_limit, "Instance out of range!");
|
||||
|
||||
unsafe {
|
||||
pass.raw.draw_indexed(
|
||||
first_index .. first_index + index_count,
|
||||
@ -317,34 +396,33 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
|
||||
pass.raw.bind_graphics_pipeline(&pipeline.raw);
|
||||
}
|
||||
|
||||
if pass.binder.pipeline_layout_id == Some(pipeline.layout_id.clone()) {
|
||||
return;
|
||||
}
|
||||
// Rebind resource
|
||||
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
|
||||
let pipeline_layout_guard = HUB.pipeline_layouts.read();
|
||||
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
|
||||
let bind_group_guard = HUB.bind_groups.read();
|
||||
|
||||
let pipeline_layout_guard = HUB.pipeline_layouts.read();
|
||||
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
|
||||
let bind_group_guard = HUB.bind_groups.read();
|
||||
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
|
||||
pass.binder
|
||||
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
|
||||
|
||||
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
|
||||
pass.binder
|
||||
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
|
||||
|
||||
for (index, (entry, &bgl_id)) in pass
|
||||
.binder
|
||||
.entries
|
||||
.iter_mut()
|
||||
.zip(&pipeline_layout.bind_group_layout_ids)
|
||||
.enumerate()
|
||||
{
|
||||
if let LayoutChange::Match(bg_id) = entry.expect_layout(bgl_id) {
|
||||
let desc_set = &bind_group_guard[bg_id].raw;
|
||||
unsafe {
|
||||
pass.raw.bind_graphics_descriptor_sets(
|
||||
&pipeline_layout.raw,
|
||||
index,
|
||||
iter::once(desc_set),
|
||||
&[],
|
||||
);
|
||||
for (index, (entry, &bgl_id)) in pass
|
||||
.binder
|
||||
.entries
|
||||
.iter_mut()
|
||||
.zip(&pipeline_layout.bind_group_layout_ids)
|
||||
.enumerate()
|
||||
{
|
||||
if let LayoutChange::Match(bg_id) = entry.expect_layout(bgl_id) {
|
||||
let desc_set = &bind_group_guard[bg_id].raw;
|
||||
unsafe {
|
||||
pass.raw.bind_graphics_descriptor_sets(
|
||||
&pipeline_layout.raw,
|
||||
index,
|
||||
iter::once(desc_set),
|
||||
&[],
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -352,8 +430,9 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
|
||||
// Rebind index buffer if the index format has changed with the pipeline switch
|
||||
if pass.index_state.format != pipeline.index_format {
|
||||
pass.index_state.format = pipeline.index_format;
|
||||
pass.index_state.update_limit();
|
||||
|
||||
if let Some((buffer_id, offset)) = pass.index_state.bound_buffer_view {
|
||||
if let Some((buffer_id, ref range)) = pass.index_state.bound_buffer_view {
|
||||
let buffer_guard = HUB.buffers.read();
|
||||
let buffer = pass
|
||||
.trackers
|
||||
@ -363,7 +442,7 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
|
||||
|
||||
let view = hal::buffer::IndexBufferView {
|
||||
buffer: &buffer.raw,
|
||||
offset,
|
||||
offset: range.start,
|
||||
index_type: conv::map_index_format(pass.index_state.format),
|
||||
};
|
||||
|
||||
@ -372,6 +451,16 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update vertex buffer limits
|
||||
for (vbs, &(stride, rate)) in pass.vertex_state.inputs.iter_mut().zip(&pipeline.vertex_strides) {
|
||||
vbs.stride = stride;
|
||||
vbs.rate = rate;
|
||||
}
|
||||
for vbs in pass.vertex_state.inputs[pipeline.vertex_strides.len() .. ].iter_mut() {
|
||||
vbs.stride = 0;
|
||||
vbs.rate = InputStepMode::Vertex;
|
||||
}
|
||||
pass.vertex_state.update_limits();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
@ -58,6 +58,7 @@ use std::{collections::hash_map::Entry, ffi, iter, ptr, slice, sync::atomic::Ord
|
||||
|
||||
const CLEANUP_WAIT_MS: u64 = 5000;
|
||||
pub const MAX_COLOR_TARGETS: usize = 4;
|
||||
pub const MAX_VERTEX_BUFFERS: usize = 8;
|
||||
|
||||
pub fn all_buffer_stages() -> hal::pso::PipelineStage {
|
||||
use hal::pso::PipelineStage as Ps;
|
||||
@ -241,6 +242,9 @@ impl PendingResources<back::Backend> {
|
||||
);
|
||||
let (life_guard, resource) = match resource_id {
|
||||
ResourceId::Buffer(id) => {
|
||||
if HUB.buffers.read()[id].pending_map_operation.is_some() {
|
||||
continue
|
||||
}
|
||||
trackers.buffers.remove(id);
|
||||
let buf = HUB.buffers.unregister(id);
|
||||
(buf.life_guard, NativeResource::Buffer(buf.raw, buf.memory))
|
||||
@ -344,14 +348,14 @@ impl PendingResources<back::Backend> {
|
||||
.drain(..)
|
||||
.map(|buffer_id| {
|
||||
let mut buffer_guard = HUB.buffers.write();
|
||||
let mut buffer = &mut buffer_guard[buffer_id];
|
||||
let buffer = &mut buffer_guard[buffer_id];
|
||||
let operation = buffer.pending_map_operation.take().unwrap();
|
||||
let result = match operation {
|
||||
BufferMapOperation::Read(ref range, ..) => {
|
||||
map_buffer(raw, limits, &mut buffer, range, HostMap::Read)
|
||||
map_buffer(raw, limits, buffer, range, HostMap::Read)
|
||||
}
|
||||
BufferMapOperation::Write(ref range, ..) => {
|
||||
map_buffer(raw, limits, &mut buffer, range, HostMap::Write)
|
||||
map_buffer(raw, limits, buffer, range, HostMap::Write)
|
||||
}
|
||||
};
|
||||
(operation, result)
|
||||
@ -602,6 +606,7 @@ pub fn device_create_buffer(
|
||||
},
|
||||
memory_properties,
|
||||
memory,
|
||||
size: desc.size,
|
||||
mapped_write_ranges: Vec::new(),
|
||||
pending_map_operation: None,
|
||||
life_guard: LifeGuard::new(),
|
||||
@ -1050,23 +1055,17 @@ pub fn device_create_bind_group(
|
||||
for (b, decl) in bindings.iter().zip(&bind_group_layout.bindings) {
|
||||
let descriptor = match b.resource {
|
||||
binding_model::BindingResource::Buffer(ref bb) => {
|
||||
let buffer = used
|
||||
.buffers
|
||||
.get_with_extended_usage(
|
||||
&*buffer_guard,
|
||||
bb.buffer,
|
||||
resource::BufferUsage::UNIFORM,
|
||||
)
|
||||
.unwrap();
|
||||
let alignment = match decl.ty {
|
||||
let (alignment, usage) = match decl.ty {
|
||||
binding_model::BindingType::UniformBuffer
|
||||
| binding_model::BindingType::UniformBufferDynamic => {
|
||||
device.limits.min_uniform_buffer_offset_alignment
|
||||
}
|
||||
| binding_model::BindingType::UniformBufferDynamic => (
|
||||
device.limits.min_uniform_buffer_offset_alignment,
|
||||
resource::BufferUsage::UNIFORM,
|
||||
),
|
||||
binding_model::BindingType::StorageBuffer
|
||||
| binding_model::BindingType::StorageBufferDynamic => {
|
||||
device.limits.min_storage_buffer_offset_alignment
|
||||
}
|
||||
| binding_model::BindingType::StorageBufferDynamic => (
|
||||
device.limits.min_storage_buffer_offset_alignment,
|
||||
resource::BufferUsage::STORAGE,
|
||||
),
|
||||
binding_model::BindingType::Sampler
|
||||
| binding_model::BindingType::SampledTexture
|
||||
| binding_model::BindingType::StorageTexture => {
|
||||
@ -1079,6 +1078,10 @@ pub fn device_create_bind_group(
|
||||
"Misaligned buffer offset {}",
|
||||
bb.offset
|
||||
);
|
||||
let buffer = used
|
||||
.buffers
|
||||
.get_with_extended_usage(&*buffer_guard, bb.buffer, usage)
|
||||
.unwrap();
|
||||
let range = Some(bb.offset) .. Some(bb.offset + bb.size);
|
||||
hal::pso::Descriptor::Buffer(&buffer.raw, range)
|
||||
}
|
||||
@ -1088,18 +1091,28 @@ pub fn device_create_bind_group(
|
||||
hal::pso::Descriptor::Sampler(&sampler.raw)
|
||||
}
|
||||
binding_model::BindingResource::TextureView(id) => {
|
||||
assert_eq!(decl.ty, binding_model::BindingType::SampledTexture);
|
||||
let (usage, image_layout) = match decl.ty {
|
||||
binding_model::BindingType::SampledTexture => (
|
||||
resource::TextureUsage::SAMPLED,
|
||||
hal::image::Layout::ShaderReadOnlyOptimal,
|
||||
),
|
||||
binding_model::BindingType::StorageTexture => (
|
||||
resource::TextureUsage::STORAGE,
|
||||
hal::image::Layout::General,
|
||||
),
|
||||
_ => panic!("Missmatched texture binding for {:?}", decl),
|
||||
};
|
||||
let view = &texture_view_guard[id];
|
||||
used.views.query(id, &view.life_guard.ref_count, DummyUsage);
|
||||
used.textures
|
||||
.transit(
|
||||
view.texture_id.value,
|
||||
&view.texture_id.ref_count,
|
||||
resource::TextureUsage::SAMPLED,
|
||||
usage,
|
||||
TrackPermit::EXTEND,
|
||||
)
|
||||
.unwrap();
|
||||
hal::pso::Descriptor::Image(&view.raw, hal::image::Layout::ShaderReadOnlyOptimal)
|
||||
hal::pso::Descriptor::Image(&view.raw, image_layout)
|
||||
}
|
||||
};
|
||||
writes.alloc().init(hal::pso::DescriptorSetWrite {
|
||||
@ -1445,9 +1458,11 @@ pub fn device_create_render_pipeline(
|
||||
desc.vertex_input.vertex_buffers_count,
|
||||
)
|
||||
};
|
||||
let mut vertex_strides = Vec::with_capacity(desc_vbs.len());
|
||||
let mut vertex_buffers = Vec::with_capacity(desc_vbs.len());
|
||||
let mut attributes = Vec::new();
|
||||
for (i, vb_state) in desc_vbs.iter().enumerate() {
|
||||
vertex_strides.alloc().init((vb_state.stride, vb_state.step_mode));
|
||||
if vb_state.attributes_count == 0 {
|
||||
continue;
|
||||
}
|
||||
@ -1558,6 +1573,7 @@ pub fn device_create_render_pipeline(
|
||||
pass_context,
|
||||
flags,
|
||||
index_format: desc.vertex_input.index_format,
|
||||
vertex_strides,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -293,4 +293,5 @@ pub struct RenderPipeline<B: hal::Backend> {
|
||||
pub(crate) pass_context: RenderPassContext,
|
||||
pub(crate) flags: PipelineFlags,
|
||||
pub(crate) index_format: IndexFormat,
|
||||
pub(crate) vertex_strides: Vec<(BufferAddress, InputStepMode)>,
|
||||
}
|
||||
|
@ -62,6 +62,7 @@ pub struct Buffer<B: hal::Backend> {
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) memory_properties: hal::memory::Properties,
|
||||
pub(crate) memory: B::Memory,
|
||||
pub(crate) size: BufferAddress,
|
||||
pub(crate) mapped_write_ranges: Vec<std::ops::Range<u64>>,
|
||||
pub(crate) pending_map_operation: Option<BufferMapOperation>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
|
@ -15,7 +15,7 @@ use hal::backend::FastHashMap;
|
||||
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
collections::hash_map::Entry,
|
||||
collections::hash_map::{Entry, Iter},
|
||||
marker::PhantomData,
|
||||
mem,
|
||||
ops::{BitOr, Range},
|
||||
@ -112,6 +112,46 @@ pub enum Stitch {
|
||||
Last,
|
||||
}
|
||||
|
||||
//TODO: consider rewriting this without any iterators that have side effects.
|
||||
pub struct ConsumeIterator<'a, I: TypedId, U: Copy + PartialEq> {
|
||||
src: Iter<'a, Index, Track<U>>,
|
||||
dst: &'a mut FastHashMap<Index, Track<U>>,
|
||||
stitch: Stitch,
|
||||
_marker: PhantomData<I>,
|
||||
}
|
||||
|
||||
impl<'a, I: TypedId, U: Copy + PartialEq> Iterator for ConsumeIterator<'a, I, U> {
|
||||
type Item = (I, Range<U>);
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
loop {
|
||||
let (&index, new) = self.src.next()?;
|
||||
match self.dst.entry(index) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(new.clone());
|
||||
}
|
||||
Entry::Occupied(mut e) => {
|
||||
assert_eq!(e.get().epoch, new.epoch);
|
||||
let old = mem::replace(&mut e.get_mut().last, new.last);
|
||||
if old != new.init {
|
||||
let state = match self.stitch {
|
||||
Stitch::Init => new.init,
|
||||
Stitch::Last => new.last,
|
||||
};
|
||||
return Some((I::new(index, new.epoch), old .. state))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to finish all side effects on drop
|
||||
impl<'a, I: TypedId, U: Copy + PartialEq> Drop for ConsumeIterator<'a, I, U> {
|
||||
fn drop(&mut self) {
|
||||
self.for_each(drop)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TrackerSet {
|
||||
pub buffers: BufferTracker,
|
||||
pub textures: TextureTracker,
|
||||
@ -128,6 +168,12 @@ impl TrackerSet {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.buffers.clear();
|
||||
self.textures.clear();
|
||||
self.views.clear();
|
||||
}
|
||||
|
||||
pub fn consume_by_extend(&mut self, other: &Self) {
|
||||
self.buffers.consume_by_extend(&other.buffers).unwrap();
|
||||
self.textures.consume_by_extend(&other.textures).unwrap();
|
||||
@ -221,29 +267,13 @@ impl<I: TypedId, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker
|
||||
&'a mut self,
|
||||
other: &'a Self,
|
||||
stitch: Stitch,
|
||||
) -> impl 'a + Iterator<Item = (I, Range<U>)> {
|
||||
other
|
||||
.map
|
||||
.iter()
|
||||
.flat_map(move |(&index, new)| match self.map.entry(index) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(new.clone());
|
||||
None
|
||||
}
|
||||
Entry::Occupied(mut e) => {
|
||||
assert_eq!(e.get().epoch, new.epoch);
|
||||
let old = mem::replace(&mut e.get_mut().last, new.last);
|
||||
if old == new.init {
|
||||
None
|
||||
} else {
|
||||
let state = match stitch {
|
||||
Stitch::Init => new.init,
|
||||
Stitch::Last => new.last,
|
||||
};
|
||||
Some((I::new(index, new.epoch), old .. state))
|
||||
}
|
||||
}
|
||||
})
|
||||
) -> ConsumeIterator<'a, I, U> {
|
||||
ConsumeIterator {
|
||||
src: other.map.iter(),
|
||||
dst: &mut self.map,
|
||||
stitch,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume another tacker, adding it's transitions to `self`.
|
||||
@ -280,6 +310,10 @@ impl<I: TypedId, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker
|
||||
}
|
||||
|
||||
impl<I: TypedId + Copy, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<I, U> {
|
||||
fn clear(&mut self) {
|
||||
self.map.clear();
|
||||
}
|
||||
|
||||
fn _get_with_usage<'a, T: 'a + Borrow<RefCount>>(
|
||||
&mut self,
|
||||
storage: &'a Storage<T, I>,
|
||||
|
Loading…
Reference in New Issue
Block a user