857: Separate valid internal IDs from external ones r=cwfitzgerald a=kvark

**Connections**
Closes #638 
wgpu-rs update - https://github.com/gfx-rs/wgpu-rs/pull/494

**Description**
The core change here is to allow user-specified IDs to be in the "Error" state that was introduced in #776 .

This is done by defining an internal `Valid<I>` wrapper. Now, the hub storages can be indexed by this valid wrapper only. For regular IDs, we have to go through `storage.get(index)`, which returns a `Result`. It still panics if the ID is totally garbage though, we don't want to handle what can't be expected here.

All the other changes come mostly as a consequence of that:
  - new "Invalid*" error variants are added
  - the error types are undergone sever refactoring
  - new `command/draw.rs` module for stuff shared between render passes and bundles
  - functions to generate error IDs for all the types
  - various renames, e.g. `comb` -> `cmd_buf`

The expected use by wgpu-rs is unchanged. So far, I don't think we'd be generating Error IDs, but we can always reconsider.
For browsers though, if `device_create_xxx` failed, they would generate and error ID. It will occupy the slot up until the corresponding JS object is dropped.

**Testing**
Tested on wgpu-rs examples

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot] 2020-08-04 20:51:11 +00:00 committed by GitHub
commit 78546f410d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 1745 additions and 1186 deletions

View File

@ -67,7 +67,7 @@ fn main() {
Some(trace::Action::Init { desc, backend }) => {
log::info!("Initializing the device for backend: {:?}", backend);
let adapter = global
.pick_adapter(
.request_adapter(
&wgc::instance::RequestAdapterOptions {
power_preference: wgt::PowerPreference::Default,
#[cfg(feature = "winit")]
@ -82,7 +82,7 @@ fn main() {
)
.expect("Unable to find an adapter for selected backend");
let info = gfx_select!(adapter => global.adapter_get_info(adapter));
let info = gfx_select!(adapter => global.adapter_get_info(adapter)).unwrap();
log::info!("Picked '{}'", info.name);
gfx_select!(adapter => global.adapter_request_device(
adapter,

View File

@ -155,21 +155,21 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
}
A::CreateBuffer(id, desc) => {
let label = Label::new(&desc.label);
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_buffer::<B>(device, &desc.map_label(|_| label.as_ptr()), id)
.unwrap();
}
A::DestroyBuffer(id) => {
self.buffer_destroy::<B>(id, true);
self.buffer_drop::<B>(id, true);
}
A::CreateTexture(id, desc) => {
let label = Label::new(&desc.label);
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_texture::<B>(device, &desc.map_label(|_| label.as_ptr()), id)
.unwrap();
}
A::DestroyTexture(id) => {
self.texture_destroy::<B>(id);
self.texture_drop::<B>(id);
}
A::CreateTextureView {
id,
@ -177,7 +177,7 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
desc,
} => {
let label = desc.as_ref().map_or(Label(None), |d| Label::new(&d.label));
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.texture_create_view::<B>(
parent_id,
desc.map(|d| d.map_label(|_| label.as_ptr())).as_ref(),
@ -186,16 +186,16 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
.unwrap();
}
A::DestroyTextureView(id) => {
self.texture_view_destroy::<B>(id).unwrap();
self.texture_view_drop::<B>(id).unwrap();
}
A::CreateSampler(id, desc) => {
let label = Label::new(&desc.label);
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_sampler::<B>(device, &desc.map_label(|_| label.as_ptr()), id)
.unwrap();
}
A::DestroySampler(id) => {
self.sampler_destroy::<B>(id);
self.sampler_drop::<B>(id);
}
A::GetSwapChainTexture { id, parent_id } => {
if let Some(id) = id {
@ -210,23 +210,23 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
.unwrap();
}
A::DestroyBindGroupLayout(id) => {
self.bind_group_layout_destroy::<B>(id);
self.bind_group_layout_drop::<B>(id);
}
A::CreatePipelineLayout(id, desc) => {
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_pipeline_layout::<B>(device, &desc, id)
.unwrap();
}
A::DestroyPipelineLayout(id) => {
self.pipeline_layout_destroy::<B>(id);
self.pipeline_layout_drop::<B>(id);
}
A::CreateBindGroup(id, desc) => {
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_bind_group::<B>(device, &desc, id)
.unwrap();
}
A::DestroyBindGroup(id) => {
self.bind_group_destroy::<B>(id);
self.bind_group_drop::<B>(id);
}
A::CreateShaderModule { id, data } => {
let source = if data.ends_with(".wgsl") {
@ -244,23 +244,23 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
.unwrap();
}
A::DestroyShaderModule(id) => {
self.shader_module_destroy::<B>(id);
self.shader_module_drop::<B>(id);
}
A::CreateComputePipeline(id, desc) => {
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_compute_pipeline::<B>(device, &desc, id)
.unwrap();
}
A::DestroyComputePipeline(id) => {
self.compute_pipeline_destroy::<B>(id);
self.compute_pipeline_drop::<B>(id);
}
A::CreateRenderPipeline(id, desc) => {
self.device_maintain_ids::<B>(device);
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_render_pipeline::<B>(device, &desc, id)
.unwrap();
}
A::DestroyRenderPipeline(id) => {
self.render_pipeline_destroy::<B>(id);
self.render_pipeline_drop::<B>(id);
}
A::CreateRenderBundle { id, desc, base } => {
let label = Label::new(&desc.label.as_ref().unwrap());
@ -276,7 +276,7 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
.unwrap();
}
A::DestroyRenderBundle(id) => {
self.render_bundle_destroy::<B>(id);
self.render_bundle_drop::<B>(id);
}
A::WriteBuffer {
id,

View File

@ -141,7 +141,7 @@ impl Corpus {
if !corpus.backends.contains(backend.into()) {
continue;
}
let adapter = match global.pick_adapter(
let adapter = match global.request_adapter(
&wgc::instance::RequestAdapterOptions {
power_preference: wgt::PowerPreference::Default,
compatible_surface: None,
@ -151,12 +151,13 @@ impl Corpus {
|id| id.backend(),
),
) {
Some(adapter) => adapter,
None => continue,
Ok(adapter) => adapter,
Err(_) => continue,
};
println!("\tBackend {:?}", backend);
let supported_features = gfx_select!(adapter => global.adapter_features(adapter));
let supported_features =
gfx_select!(adapter => global.adapter_features(adapter)).unwrap();
for test_path in &corpus.tests {
println!("\t\tTest '{:?}'", test_path);
let test = Test::load(dir.join(test_path), adapter.backend());

View File

@ -3,8 +3,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
device::SHADER_STAGE_COUNT,
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId},
device::{DeviceError, SHADER_STAGE_COUNT},
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId, Valid},
track::{TrackerSet, DUMMY_SELECTOR},
validation::{MissingBufferUsageError, MissingTextureUsageError},
FastHashMap, LifeGuard, MultiRefCount, RefCount, Stored, MAX_BIND_GROUPS,
@ -27,14 +27,14 @@ use thiserror::Error;
#[derive(Clone, Debug, Error)]
pub enum CreateBindGroupLayoutError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("arrays of bindings unsupported for this type of binding")]
ArrayUnsupported,
#[error("conflicting binding at index {0}")]
ConflictBinding(u32),
#[error("required device feature is missing: {0:?}")]
MissingFeature(wgt::Features),
#[error("not enough memory left")]
OutOfMemory,
#[error(transparent)]
TooManyBindings(BindingTypeMaxCountError),
#[error("arrays of bindings can't be 0 elements long")]
@ -43,6 +43,16 @@ pub enum CreateBindGroupLayoutError {
#[derive(Clone, Debug, Error)]
pub enum CreateBindGroupError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("bind group layout is invalid")]
InvalidLayout,
#[error("buffer {0:?} is invalid")]
InvalidBuffer(BufferId),
#[error("texture view {0:?} is invalid")]
InvalidTextureView(TextureViewId),
#[error("sampler {0:?} is invalid")]
InvalidSampler(SamplerId),
#[error("binding count declared with {expected} items, but {actual} items were provided")]
BindingArrayLengthMismatch { actual: usize, expected: usize },
#[error("bound buffer range {range:?} does not fit in buffer of size {size}")]
@ -264,6 +274,10 @@ pub struct BindGroupLayout<B: hal::Backend> {
#[derive(Clone, Debug, Error)]
pub enum CreatePipelineLayoutError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("bind group layout {0:?} is invalid")]
InvalidBindGroupLayout(BindGroupLayoutId),
#[error(
"push constant at index {index} has range bound {bound} not aligned to {}",
wgt::PUSH_CONSTANT_ALIGNMENT
@ -277,8 +291,6 @@ pub enum CreatePipelineLayoutError {
provided: wgt::ShaderStage,
intersected: wgt::ShaderStage,
},
#[error("not enough memory left")]
OutOfMemory,
#[error("push constant at index {index} has range {}..{} which exceeds device push constant size limit 0..{max}", range.start, range.end)]
PushConstantRangeTooLarge {
index: usize,
@ -326,7 +338,7 @@ pub struct PipelineLayout<B: hal::Backend> {
pub(crate) raw: B::PipelineLayout,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
pub(crate) bind_group_layout_ids: ArrayVec<[BindGroupLayoutId; MAX_BIND_GROUPS]>,
pub(crate) bind_group_layout_ids: ArrayVec<[Valid<BindGroupLayoutId>; MAX_BIND_GROUPS]>,
pub(crate) push_constant_ranges: ArrayVec<[wgt::PushConstantRange; SHADER_STAGE_COUNT]>,
}
@ -459,7 +471,7 @@ pub struct BindGroupDynamicBindingData {
pub struct BindGroup<B: hal::Backend> {
pub(crate) raw: DescriptorSet<B>,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) layout_id: BindGroupLayoutId,
pub(crate) layout_id: Valid<BindGroupLayoutId>,
pub(crate) life_guard: LifeGuard,
pub(crate) used: TrackerSet,
pub(crate) dynamic_binding_info: Vec<BindGroupDynamicBindingData>,

View File

@ -4,8 +4,8 @@
use super::CommandBuffer;
use crate::{
hub::GfxBackend, id::DeviceId, track::TrackerSet, FastHashMap, PrivateFeatures, Stored,
SubmissionIndex,
device::DeviceError, hub::GfxBackend, id::DeviceId, track::TrackerSet, FastHashMap,
PrivateFeatures, Stored, SubmissionIndex,
};
use hal::{command::CommandBuffer as _, device::Device as _, pool::CommandPool as _};
@ -97,7 +97,7 @@ impl<B: GfxBackend> CommandAllocator<B> {
self.queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.or(Err(CommandAllocatorError::OutOfMemory))?
.or(Err(DeviceError::OutOfMemory))?
};
let pool = CommandPool {
raw,
@ -147,7 +147,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.or(Err(CommandAllocatorError::OutOfMemory))?
.or(Err(DeviceError::OutOfMemory))?
},
total: 0,
available: Vec::new(),
@ -256,6 +256,6 @@ impl<B: hal::Backend> CommandAllocator<B> {
#[derive(Clone, Debug, Error)]
pub enum CommandAllocatorError {
#[error("not enough memory left")]
OutOfMemory,
#[error(transparent)]
Device(#[from] DeviceError),
}

View File

@ -6,7 +6,7 @@ use crate::{
binding_model::{BindGroup, PipelineLayout},
device::SHADER_STAGE_COUNT,
hub::{GfxBackend, Storage},
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId},
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId, Valid},
Stored, MAX_BIND_GROUPS,
};
@ -17,15 +17,15 @@ use wgt::DynamicOffset;
type BindGroupMask = u8;
#[derive(Clone, Debug)]
pub struct BindGroupPair {
layout_id: BindGroupLayoutId,
pub(super) struct BindGroupPair {
layout_id: Valid<BindGroupLayoutId>,
group_id: Stored<BindGroupId>,
}
#[derive(Debug)]
pub enum LayoutChange<'a> {
pub(super) enum LayoutChange<'a> {
Unchanged,
Match(BindGroupId, &'a [DynamicOffset]),
Match(Valid<BindGroupId>, &'a [DynamicOffset]),
Mismatch,
}
@ -36,11 +36,11 @@ pub enum Provision {
}
#[derive(Clone)]
pub struct FollowUpIter<'a> {
pub(super) struct FollowUpIter<'a> {
iter: slice::Iter<'a, BindGroupEntry>,
}
impl<'a> Iterator for FollowUpIter<'a> {
type Item = (BindGroupId, &'a [DynamicOffset]);
type Item = (Valid<BindGroupId>, &'a [DynamicOffset]);
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
@ -49,8 +49,8 @@ impl<'a> Iterator for FollowUpIter<'a> {
}
#[derive(Clone, Default, Debug)]
pub struct BindGroupEntry {
expected_layout_id: Option<BindGroupLayoutId>,
pub(super) struct BindGroupEntry {
expected_layout_id: Option<Valid<BindGroupLayoutId>>,
provided: Option<BindGroupPair>,
dynamic_offsets: Vec<DynamicOffset>,
}
@ -58,11 +58,11 @@ pub struct BindGroupEntry {
impl BindGroupEntry {
fn provide<B: GfxBackend>(
&mut self,
bind_group_id: BindGroupId,
bind_group_id: Valid<BindGroupId>,
bind_group: &BindGroup<B>,
offsets: &[DynamicOffset],
) -> Provision {
debug_assert_eq!(B::VARIANT, bind_group_id.backend());
debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
let was_compatible = match self.provided {
Some(BindGroupPair {
@ -91,7 +91,10 @@ impl BindGroupEntry {
Provision::Changed { was_compatible }
}
pub fn expect_layout(&mut self, bind_group_layout_id: BindGroupLayoutId) -> LayoutChange {
pub fn expect_layout(
&mut self,
bind_group_layout_id: Valid<BindGroupLayoutId>,
) -> LayoutChange {
let some = Some(bind_group_layout_id);
if self.expected_layout_id != some {
self.expected_layout_id = some;
@ -117,7 +120,7 @@ impl BindGroupEntry {
}
}
fn actual_value(&self) -> Option<BindGroupId> {
fn actual_value(&self) -> Option<Valid<BindGroupId>> {
self.expected_layout_id.and_then(|layout_id| {
self.provided.as_ref().and_then(|pair| {
if pair.layout_id == layout_id {
@ -132,12 +135,12 @@ impl BindGroupEntry {
#[derive(Debug)]
pub struct Binder {
pub(crate) pipeline_layout_id: Option<PipelineLayoutId>, //TODO: strongly `Stored`
pub(crate) entries: ArrayVec<[BindGroupEntry; MAX_BIND_GROUPS]>,
pub(super) pipeline_layout_id: Option<Valid<PipelineLayoutId>>, //TODO: strongly `Stored`
pub(super) entries: ArrayVec<[BindGroupEntry; MAX_BIND_GROUPS]>,
}
impl Binder {
pub(crate) fn new(max_bind_groups: u32) -> Self {
pub(super) fn new(max_bind_groups: u32) -> Self {
Self {
pipeline_layout_id: None,
entries: (0..max_bind_groups)
@ -146,15 +149,15 @@ impl Binder {
}
}
pub(crate) fn reset(&mut self) {
pub(super) fn reset(&mut self) {
self.pipeline_layout_id = None;
self.entries.clear();
}
pub(crate) fn change_pipeline_layout<B: GfxBackend>(
pub(super) fn change_pipeline_layout<B: GfxBackend>(
&mut self,
guard: &Storage<PipelineLayout<B>, PipelineLayoutId>,
new_id: PipelineLayoutId,
new_id: Valid<PipelineLayoutId>,
) {
let old_id_opt = self.pipeline_layout_id.replace(new_id);
let new = &guard[new_id];
@ -180,15 +183,15 @@ impl Binder {
/// (i.e. compatible with current expectations). Also returns an iterator
/// of bind group IDs to be bound with it: those are compatible bind groups
/// that were previously blocked because the current one was incompatible.
pub(crate) fn provide_entry<'a, B: GfxBackend>(
pub(super) fn provide_entry<'a, B: GfxBackend>(
&'a mut self,
index: usize,
bind_group_id: BindGroupId,
bind_group_id: Valid<BindGroupId>,
bind_group: &BindGroup<B>,
offsets: &[DynamicOffset],
) -> Option<(PipelineLayoutId, FollowUpIter<'a>)> {
) -> Option<(Valid<PipelineLayoutId>, FollowUpIter<'a>)> {
tracing::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
debug_assert_eq!(B::VARIANT, bind_group_id.backend());
debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
match self.entries[index].provide(bind_group_id, bind_group, offsets) {
Provision::Unchanged => None,
@ -215,7 +218,7 @@ impl Binder {
}
}
pub(crate) fn invalid_mask(&self) -> BindGroupMask {
pub(super) fn invalid_mask(&self) -> BindGroupMask {
self.entries.iter().enumerate().fold(0, |mask, (i, entry)| {
if entry.is_valid() {
mask

View File

@ -38,16 +38,18 @@
#![allow(clippy::reversed_empty_ranges)]
use crate::{
binding_model::PushConstantUploadError,
command::{BasePass, RenderCommand},
command::{BasePass, DrawError, RenderCommand, RenderCommandError},
conv,
device::{AttachmentData, Label, RenderPassContext, MAX_VERTEX_BUFFERS, SHADER_STAGE_COUNT},
device::{
AttachmentData, DeviceError, Label, RenderPassContext, MAX_VERTEX_BUFFERS,
SHADER_STAGE_COUNT,
},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Storage, Token},
id,
resource::BufferUse,
span,
track::TrackerSet,
validation::{check_buffer_usage, MissingBufferUsageError, MissingTextureUsageError},
validation::check_buffer_usage,
LifeGuard, RefCount, Stored, MAX_BIND_GROUPS,
};
use arrayvec::ArrayVec;
@ -124,9 +126,12 @@ impl RenderBundle {
/// This is partially duplicating the logic of `command_encoder_run_render_pass`.
/// However the point of this function is to be lighter, since we already had
/// a chance to go through the commands in `render_bundle_encoder_finish`.
///
/// Note that the function isn't expected to fail.
/// All the validation has already been done by this point.
pub(crate) unsafe fn execute<B: GfxBackend>(
&self,
cmdbuf: &mut B::CommandBuffer,
cmd_buf: &mut B::CommandBuffer,
pipeline_layout_guard: &Storage<
crate::binding_model::PipelineLayout<B>,
id::PipelineLayoutId,
@ -134,12 +139,12 @@ impl RenderBundle {
bind_group_guard: &Storage<crate::binding_model::BindGroup<B>, id::BindGroupId>,
pipeline_guard: &Storage<crate::pipeline::RenderPipeline<B>, id::RenderPipelineId>,
buffer_guard: &Storage<crate::resource::Buffer<B>, id::BufferId>,
) -> Result<(), RenderCommandError> {
) {
use hal::command::CommandBuffer as _;
let mut offsets = self.base.dynamic_offsets.as_slice();
let mut index_type = hal::IndexType::U16;
let mut pipeline_layout_id = None::<id::PipelineLayoutId>;
let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
for command in self.base.commands.iter() {
match *command {
@ -148,8 +153,8 @@ impl RenderBundle {
num_dynamic_offsets,
bind_group_id,
} => {
let bind_group = &bind_group_guard[bind_group_id];
cmdbuf.bind_graphics_descriptor_sets(
let bind_group = bind_group_guard.get(bind_group_id).unwrap();
cmd_buf.bind_graphics_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id.unwrap()].raw,
index as usize,
iter::once(bind_group.raw.raw()),
@ -158,8 +163,8 @@ impl RenderBundle {
offsets = &offsets[num_dynamic_offsets as usize..];
}
RenderCommand::SetPipeline(pipeline_id) => {
let pipeline = &pipeline_guard[pipeline_id];
cmdbuf.bind_graphics_pipeline(&pipeline.raw);
let pipeline = pipeline_guard.get(pipeline_id).unwrap();
cmd_buf.bind_graphics_pipeline(&pipeline.raw);
index_type = conv::map_index_format(pipeline.index_format);
pipeline_layout_id = Some(pipeline.layout_id.value);
}
@ -168,7 +173,7 @@ impl RenderBundle {
offset,
size,
} => {
let buffer = &buffer_guard[buffer_id];
let buffer = buffer_guard.get(buffer_id).unwrap();
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
range: hal::buffer::SubRange {
@ -178,7 +183,7 @@ impl RenderBundle {
index_type,
};
cmdbuf.bind_index_buffer(view);
cmd_buf.bind_index_buffer(view);
}
RenderCommand::SetVertexBuffer {
slot,
@ -186,12 +191,12 @@ impl RenderBundle {
offset,
size,
} => {
let buffer = &buffer_guard[buffer_id];
let buffer = buffer_guard.get(buffer_id).unwrap();
let range = hal::buffer::SubRange {
offset,
size: size.map(|s| s.get()),
};
cmdbuf.bind_vertex_buffers(slot, iter::once((&buffer.raw, range)));
cmd_buf.bind_vertex_buffers(slot, iter::once((&buffer.raw, range)));
}
RenderCommand::SetPushConstant {
stages,
@ -199,8 +204,7 @@ impl RenderBundle {
size_bytes,
values_offset,
} => {
let pipeline_layout_id =
pipeline_layout_id.ok_or(RenderCommandError::UnboundPipeline)?;
let pipeline_layout_id = pipeline_layout_id.unwrap();
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
if let Some(values_offset) = values_offset {
@ -208,7 +212,7 @@ impl RenderBundle {
let data_slice = &self.base.push_constant_data
[(values_offset as usize)..values_end_offset];
cmdbuf.push_graphics_constants(
cmd_buf.push_graphics_constants(
&pipeline_layout.raw,
conv::map_shader_stage_flags(stages),
offset,
@ -219,7 +223,7 @@ impl RenderBundle {
offset,
size_bytes,
|clear_offset, clear_data| {
cmdbuf.push_graphics_constants(
cmd_buf.push_graphics_constants(
&pipeline_layout.raw,
conv::map_shader_stage_flags(stages),
clear_offset,
@ -235,7 +239,7 @@ impl RenderBundle {
first_vertex,
first_instance,
} => {
cmdbuf.draw(
cmd_buf.draw(
first_vertex..first_vertex + vertex_count,
first_instance..first_instance + instance_count,
);
@ -247,7 +251,7 @@ impl RenderBundle {
base_vertex,
first_instance,
} => {
cmdbuf.draw_indexed(
cmd_buf.draw_indexed(
first_index..first_index + index_count,
base_vertex,
first_instance..first_instance + instance_count,
@ -259,8 +263,8 @@ impl RenderBundle {
count: None,
indexed: false,
} => {
let buffer = &buffer_guard[buffer_id];
cmdbuf.draw_indirect(&buffer.raw, offset, 1, 0);
let buffer = buffer_guard.get(buffer_id).unwrap();
cmd_buf.draw_indirect(&buffer.raw, offset, 1, 0);
}
RenderCommand::MultiDrawIndirect {
buffer_id,
@ -268,8 +272,8 @@ impl RenderBundle {
count: None,
indexed: true,
} => {
let buffer = &buffer_guard[buffer_id];
cmdbuf.draw_indexed_indirect(&buffer.raw, offset, 1, 0);
let buffer = buffer_guard.get(buffer_id).unwrap();
cmd_buf.draw_indexed_indirect(&buffer.raw, offset, 1, 0);
}
RenderCommand::MultiDrawIndirect { .. }
| RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
@ -283,7 +287,6 @@ impl RenderBundle {
| RenderCommand::SetScissor(_) => unreachable!(),
}
}
Ok(())
}
}
@ -488,12 +491,12 @@ impl State {
&mut self,
slot: u8,
bind_group_id: id::BindGroupId,
layout_id: id::BindGroupLayoutId,
layout_id: id::Valid<id::BindGroupLayoutId>,
offsets: &[wgt::DynamicOffset],
) {
if self.bind[slot as usize].set_group(
bind_group_id,
layout_id,
layout_id.0,
self.raw_dynamic_offsets.len(),
offsets.len(),
) {
@ -506,7 +509,7 @@ impl State {
&mut self,
index_format: wgt::IndexFormat,
vertex_strides: &[(wgt::BufferAddress, wgt::InputStepMode)],
layout_ids: &[id::BindGroupLayoutId],
layout_ids: &[id::Valid<id::BindGroupLayoutId>],
push_constant_layouts: &[wgt::PushConstantRange],
) {
self.index.set_format(index_format);
@ -529,8 +532,8 @@ impl State {
self.bind
.iter()
.zip(layout_ids)
.position(|(bs, &layout_id)| match bs.bind_group {
Some((_, bgl_id)) => bgl_id != layout_id,
.position(|(bs, layout_id)| match bs.bind_group {
Some((_, bgl_id)) => bgl_id != layout_id.0,
None => false,
})
};
@ -595,36 +598,15 @@ impl State {
}
}
/// Error encountered when encoding a render command.
/// Error encountered when finishing recording a render bundle.
#[derive(Clone, Debug, Error)]
pub enum RenderCommandError {
#[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("dynamic buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(u64),
#[error("number of buffer offsets ({actual}) does not match the number of dynamic bindings ({expected})")]
InvalidDynamicOffsetCount { actual: usize, expected: usize },
#[error("render pipeline output formats and sample counts do not match render pass attachment formats")]
IncompatiblePipeline,
#[error("pipeline is not compatible with the depth-stencil read-only render pass")]
IncompatibleReadOnlyDepthStencil,
pub enum RenderBundleError {
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
Device(#[from] DeviceError),
#[error(transparent)]
MissingTextureUsage(#[from] MissingTextureUsageError),
#[error("a render pipeline must be bound")]
UnboundPipeline,
RenderCommand(#[from] RenderCommandError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
#[error("vertex {last_vertex} extends beyond limit {vertex_limit}")]
VertexBeyondLimit { last_vertex: u32, vertex_limit: u32 },
#[error("instance {last_instance} extends beyond limit {instance_limit}")]
InstanceBeyondLimit {
last_instance: u32,
instance_limit: u32,
},
#[error("index {last_index} extends beyond limit {index_limit}")]
IndexBeyondLimit { last_index: u32, index_limit: u32 },
Draw(#[from] DrawError),
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
@ -633,13 +615,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
bundle_encoder: RenderBundleEncoder,
desc: &wgt::RenderBundleDescriptor<Label>,
id_in: Input<G, id::RenderBundleId>,
) -> Result<id::RenderBundleId, RenderCommandError> {
) -> Result<id::RenderBundleId, RenderBundleError> {
span!(_guard, INFO, "RenderBundleEncoder::finish");
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[bundle_encoder.parent_id];
let device = device_guard
.get(bundle_encoder.parent_id)
.map_err(|_| DeviceError::Invalid)?;
let render_bundle = {
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
@ -660,7 +644,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
let mut commands = Vec::new();
let mut base = bundle_encoder.base.as_ref();
let mut pipeline_layout_id = None::<id::PipelineLayoutId>;
let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
for &command in base.commands {
match command {
@ -671,10 +655,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} => {
let max_bind_groups = device.limits.max_bind_groups;
if (index as u32) >= max_bind_groups {
return Err(RenderCommandError::BindGroupIndexOutOfRange {
Err(RenderCommandError::BindGroupIndexOutOfRange {
index,
max: max_bind_groups,
});
})?
}
let offsets = &base.dynamic_offsets[..num_dynamic_offsets as usize];
@ -686,19 +670,19 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map(|offset| *offset as wgt::BufferAddress)
.find(|offset| offset % wgt::BIND_BUFFER_ALIGNMENT != 0)
{
return Err(RenderCommandError::UnalignedBufferOffset(offset));
Err(RenderCommandError::UnalignedBufferOffset(offset))?
}
let bind_group = state
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
.map_err(|_| RenderCommandError::InvalidBindGroup(bind_group_id))?;
if bind_group.dynamic_binding_info.len() != offsets.len() {
return Err(RenderCommandError::InvalidDynamicOffsetCount {
Err(RenderCommandError::InvalidDynamicOffsetCount {
actual: offsets.len(),
expected: bind_group.dynamic_binding_info.len(),
});
})?
}
state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets);
@ -712,7 +696,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.unwrap();
if !bundle_encoder.context.compatible(&pipeline.pass_context) {
return Err(RenderCommandError::IncompatiblePipeline);
Err(RenderCommandError::IncompatiblePipeline)?
}
//TODO: check read-only depth
@ -740,7 +724,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDEX)
.unwrap();
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDEX)?;
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDEX)
.map_err(RenderCommandError::from)?;
let end = match size {
Some(s) => offset + s.get(),
@ -759,7 +744,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::VERTEX)
.unwrap();
check_buffer_usage(buffer.usage, wgt::BufferUsage::VERTEX)?;
check_buffer_usage(buffer.usage, wgt::BufferUsage::VERTEX)
.map_err(RenderCommandError::from)?;
let end = match size {
Some(s) => offset + s.get(),
@ -776,11 +762,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let end_offset = offset + size_bytes;
let pipeline_layout_id =
pipeline_layout_id.ok_or(RenderCommandError::UnboundPipeline)?;
pipeline_layout_id.ok_or(DrawError::MissingPipeline)?;
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
pipeline_layout
.validate_push_constant_ranges(stages, offset, end_offset)?;
.validate_push_constant_ranges(stages, offset, end_offset)
.map_err(RenderCommandError::from)?;
commands.push(command);
}
@ -793,17 +780,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (vertex_limit, instance_limit) = state.vertex_limits();
let last_vertex = first_vertex + vertex_count;
if last_vertex > vertex_limit {
return Err(RenderCommandError::VertexBeyondLimit {
Err(DrawError::VertexBeyondLimit {
last_vertex,
vertex_limit,
});
})?
}
let last_instance = first_instance + instance_count;
if last_instance > instance_limit {
return Err(RenderCommandError::InstanceBeyondLimit {
Err(DrawError::InstanceBeyondLimit {
last_instance,
instance_limit,
});
})?
}
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
@ -821,17 +808,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let index_limit = state.index.limit();
let last_index = first_index + index_count;
if last_index > index_limit {
return Err(RenderCommandError::IndexBeyondLimit {
Err(DrawError::IndexBeyondLimit {
last_index,
index_limit,
});
})?
}
let last_instance = first_instance + instance_count;
if last_instance > instance_limit {
return Err(RenderCommandError::InstanceBeyondLimit {
Err(DrawError::InstanceBeyondLimit {
last_instance,
instance_limit,
});
})?
}
commands.extend(state.index.flush());
commands.extend(state.flush_vertices());
@ -849,7 +836,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.unwrap();
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)?;
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
.map_err(RenderCommandError::from)?;
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
@ -865,8 +853,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.unwrap();
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)?;
.map_err(|err| RenderCommandError::Buffer(buffer_id, err))?;
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
.map_err(RenderCommandError::from)?;
commands.extend(state.index.flush());
commands.extend(state.flush_vertices());
@ -899,7 +888,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
push_constant_data: Vec::new(),
},
device_id: Stored {
value: bundle_encoder.parent_id,
value: id::Valid(bundle_encoder.parent_id),
ref_count: device.life_guard.add_ref(),
},
used: state.trackers,
@ -920,7 +909,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (bundle_guard, _) = hub.render_bundles.read(&mut token);
let bundle = &bundle_guard[id];
trace.lock().add(trace::Action::CreateRenderBundle {
id,
id: id.0,
desc: trace::new_render_bundle_encoder_descriptor(desc.label, &bundle.context),
base: BasePass::from_ref(bundle.base.as_ref()),
});
@ -934,7 +923,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.bundles
.init(id, ref_count, PhantomData)
.unwrap();
Ok(id)
Ok(id.0)
}
}

View File

@ -6,7 +6,7 @@ use crate::{
binding_model::{BindError, PushConstantUploadError},
command::{
bind::{Binder, LayoutChange},
BasePass, BasePassRef, CommandBuffer,
BasePass, BasePassRef, CommandBuffer, CommandEncoderError, UsageConflict,
},
device::all_buffer_stages,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
@ -18,7 +18,7 @@ use crate::{
use hal::command::CommandBuffer as _;
use thiserror::Error;
use wgt::{BufferAddress, BufferUsage};
use wgt::{BufferAddress, BufferUsage, ShaderStage};
use std::{fmt, iter, str};
@ -97,6 +97,44 @@ pub struct ComputePassDescriptor {
pub todo: u32,
}
#[derive(Clone, Debug, Error, PartialEq)]
pub enum DispatchError {
#[error("compute pipeline must be set")]
MissingPipeline,
#[error("current compute pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
IncompatibleBindGroup {
index: u32,
//expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
},
}
#[derive(Clone, Debug, Error)]
pub enum ComputePassError {
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
#[error("bind group {0:?} is invalid")]
InvalidBindGroup(id::BindGroupId),
#[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("compute pipeline {0:?} is invalid")]
InvalidPipeline(id::ComputePipelineId),
#[error("indirect buffer {0:?} is invalid")]
InvalidIndirectBuffer(id::BufferId),
#[error(transparent)]
ResourceUsageConflict(UsageConflict),
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error("cannot pop debug group, because number of pushed debug groups is zero")]
InvalidPopDebugGroup,
#[error(transparent)]
Dispatch(#[from] DispatchError),
#[error(transparent)]
Bind(#[from] BindError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
}
#[derive(Debug, PartialEq)]
enum PipelineState {
Required,
@ -110,20 +148,21 @@ struct State {
debug_scope_depth: u32,
}
#[derive(Clone, Debug, Error)]
pub enum ComputePassError {
#[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("a compute pipeline must be bound")]
UnboundPipeline,
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error("cannot pop debug group, because number of pushed debug groups is zero")]
InvalidPopDebugGroup,
#[error(transparent)]
Bind(#[from] BindError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
impl State {
fn is_ready(&self) -> Result<(), DispatchError> {
//TODO: vertex buffers
let bind_mask = self.binder.invalid_mask();
if bind_mask != 0 {
//let (expected, provided) = self.binder.entries[index as usize].info();
return Err(DispatchError::IncompatibleBindGroup {
index: bind_mask.trailing_zeros(),
});
}
if self.pipeline == PipelineState::Required {
return Err(DispatchError::MissingPipeline);
}
Ok(())
}
}
// Common routines between render/compute
@ -147,12 +186,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let raw = cmb.raw.last_mut().unwrap();
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let raw = cmd_buf.raw.last_mut().unwrap();
#[cfg(feature = "trace")]
match cmb.commands {
match cmd_buf.commands {
Some(ref mut list) => {
list.push(crate::device::trace::Command::RunComputePass {
base: BasePass::from_ref(base),
@ -169,7 +208,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (texture_guard, _) = hub.textures.read(&mut token);
let mut state = State {
binder: Binder::new(cmb.limits.max_bind_groups),
binder: Binder::new(cmd_buf.limits.max_bind_groups),
pipeline: PipelineState::Required,
debug_scope_depth: 0,
};
@ -181,7 +220,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
num_dynamic_offsets,
bind_group_id,
} => {
let max_bind_groups = cmb.limits.max_bind_groups;
let max_bind_groups = cmd_buf.limits.max_bind_groups;
if (index as u32) >= max_bind_groups {
return Err(ComputePassError::BindGroupIndexOutOfRange {
index,
@ -192,11 +231,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let offsets = &base.dynamic_offsets[..num_dynamic_offsets as usize];
base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..];
let bind_group = cmb
let bind_group = cmd_buf
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
.map_err(|_| ComputePassError::InvalidBindGroup(bind_group_id))?;
bind_group.validate_dynamic_bindings(offsets)?;
tracing::trace!(
@ -206,7 +245,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
CommandBuffer::insert_barriers(
raw,
&mut cmb.trackers,
&mut cmd_buf.trackers,
&bind_group.used,
&*buffer_guard,
&*texture_guard,
@ -214,7 +253,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry(
index as usize,
bind_group_id,
id::Valid(bind_group_id),
bind_group,
offsets,
) {
@ -238,11 +277,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
ComputeCommand::SetPipeline(pipeline_id) => {
state.pipeline = PipelineState::Set;
let pipeline = cmb
let pipeline = cmd_buf
.trackers
.compute_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ())
.unwrap();
.map_err(|_| ComputePassError::InvalidPipeline(pipeline_id))?;
unsafe {
raw.bind_compute_pipeline(&pipeline.raw);
@ -319,12 +358,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let pipeline_layout_id = state
.binder
.pipeline_layout_id
.ok_or(ComputePassError::UnboundPipeline)?;
//TODO: don't error here, lazily update the push constants
.ok_or(ComputePassError::Dispatch(DispatchError::MissingPipeline))?;
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
pipeline_layout
.validate_push_constant_ranges(
wgt::ShaderStage::COMPUTE,
ShaderStage::COMPUTE,
offset,
end_offset_bytes,
)
@ -333,23 +373,19 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
unsafe { raw.push_compute_constants(&pipeline_layout.raw, offset, data_slice) }
}
ComputeCommand::Dispatch(groups) => {
if state.pipeline != PipelineState::Set {
return Err(ComputePassError::UnboundPipeline);
}
state.is_ready()?;
unsafe {
raw.dispatch(groups);
}
}
ComputeCommand::DispatchIndirect { buffer_id, offset } => {
if state.pipeline != PipelineState::Set {
return Err(ComputePassError::UnboundPipeline);
}
let (src_buffer, src_pending) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
buffer_id,
(),
BufferUse::INDIRECT,
);
state.is_ready()?;
let (src_buffer, src_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.map_err(ComputePassError::InvalidIndirectBuffer)?;
check_buffer_usage(src_buffer.usage, BufferUsage::INDIRECT)?;
let barriers = src_pending.map(|pending| pending.into_hal(src_buffer));

View File

@ -0,0 +1,71 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Draw structures - shared between render passes and bundles.
!*/
use crate::{
binding_model::PushConstantUploadError,
id,
resource::BufferUse,
track::UseExtendError,
validation::{MissingBufferUsageError, MissingTextureUsageError},
};
pub type BufferError = UseExtendError<BufferUse>;
use thiserror::Error;
/// Error validating a draw call.
#[derive(Clone, Debug, Error, PartialEq)]
pub enum DrawError {
#[error("blend color needs to be set")]
MissingBlendColor,
#[error("stencil reference needs to be set")]
MissingStencilReference,
#[error("render pipeline must be set")]
MissingPipeline,
#[error("current render pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
IncompatibleBindGroup {
index: u32,
//expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
},
#[error("vertex {last_vertex} extends beyond limit {vertex_limit}")]
VertexBeyondLimit { last_vertex: u32, vertex_limit: u32 },
#[error("instance {last_instance} extends beyond limit {instance_limit}")]
InstanceBeyondLimit {
last_instance: u32,
instance_limit: u32,
},
#[error("index {last_index} extends beyond limit {index_limit}")]
IndexBeyondLimit { last_index: u32, index_limit: u32 },
}
/// Error encountered when encoding a render command.
/// This is the shared error set between render bundles and passes.
#[derive(Clone, Debug, Error)]
pub enum RenderCommandError {
#[error("bind group {0:?} is invalid")]
InvalidBindGroup(id::BindGroupId),
#[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("dynamic buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(u64),
#[error("number of buffer offsets ({actual}) does not match the number of dynamic bindings ({expected})")]
InvalidDynamicOffsetCount { actual: usize, expected: usize },
#[error("render pipeline {0:?} is invalid")]
InvalidPipeline(id::RenderPipelineId),
#[error("render pipeline output formats and sample counts do not match render pass attachment formats")]
IncompatiblePipeline,
#[error("pipeline is not compatible with the depth-stencil read-only render pass")]
IncompatibleReadOnlyDepthStencil,
#[error("buffer {0:?} is in error {1:?}")]
Buffer(id::BufferId, BufferError),
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error(transparent)]
MissingTextureUsage(#[from] MissingTextureUsageError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
}

View File

@ -6,6 +6,7 @@ mod allocator;
mod bind;
mod bundle;
mod compute;
mod draw;
mod render;
mod transfer;
@ -13,6 +14,7 @@ pub(crate) use self::allocator::CommandAllocator;
pub use self::allocator::CommandAllocatorError;
pub use self::bundle::*;
pub use self::compute::*;
pub use self::draw::*;
pub use self::render::*;
pub use self::transfer::*;
@ -48,6 +50,17 @@ pub struct CommandBuffer<B: hal::Backend> {
}
impl<B: GfxBackend> CommandBuffer<B> {
fn get_encoder(
storage: &mut Storage<Self, id::CommandEncoderId>,
id: id::CommandEncoderId,
) -> Result<&mut Self, CommandEncoderError> {
match storage.get_mut(id) {
Ok(cmd_buf) if cmd_buf.is_recording => Ok(cmd_buf),
Ok(_) => Err(CommandEncoderError::NotRecording),
Err(_) => Err(CommandEncoderError::Invalid),
}
}
pub(crate) fn insert_barriers(
raw: &mut B::CommandBuffer,
base: &mut TrackerSet,
@ -144,8 +157,10 @@ impl<C: Clone> BasePass<C> {
}
#[derive(Clone, Debug, Error)]
pub enum CommandEncoderFinishError {
#[error("command buffer must be recording")]
pub enum CommandEncoderError {
#[error("command encoder is invalid")]
Invalid,
#[error("command encoder must be active")]
NotRecording,
}
@ -154,28 +169,25 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
encoder_id: id::CommandEncoderId,
_desc: &wgt::CommandBufferDescriptor,
) -> Result<id::CommandBufferId, CommandEncoderFinishError> {
) -> Result<id::CommandBufferId, CommandEncoderError> {
span!(_guard, INFO, "CommandEncoder::finish");
let hub = B::hub(self);
let mut token = Token::root();
let (swap_chain_guard, mut token) = hub.swap_chains.read(&mut token);
//TODO: actually close the last recorded command buffer
let (mut comb_guard, _) = hub.command_buffers.write(&mut token);
let cmdbuf = &mut comb_guard[encoder_id];
if !cmdbuf.is_recording {
return Err(CommandEncoderFinishError::NotRecording);
}
cmdbuf.is_recording = false;
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
cmd_buf.is_recording = false;
// stop tracking the swapchain image, if used
if let Some((ref sc_id, _)) = cmdbuf.used_swap_chain {
if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain {
let view_id = swap_chain_guard[sc_id.value]
.acquired_view_id
.as_ref()
.expect("Used swap chain frame has already presented");
cmdbuf.trackers.views.remove(view_id.value);
cmd_buf.trackers.views.remove(view_id.value);
}
tracing::trace!("Command buffer {:?} {:#?}", encoder_id, cmdbuf.trackers);
tracing::trace!("Command buffer {:?} {:#?}", encoder_id, cmd_buf.trackers);
Ok(encoder_id)
}
@ -183,56 +195,70 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&self,
encoder_id: id::CommandEncoderId,
label: &str,
) {
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::push_debug_group");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, _) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let cmb_raw = cmb.raw.last_mut().unwrap();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.begin_debug_marker(label, 0);
}
Ok(())
}
pub fn command_encoder_insert_debug_marker<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
label: &str,
) {
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::insert_debug_marker");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, _) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let cmb_raw = cmb.raw.last_mut().unwrap();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.insert_debug_marker(label, 0);
}
Ok(())
}
pub fn command_encoder_pop_debug_group<B: GfxBackend>(&self, encoder_id: id::CommandEncoderId) {
pub fn command_encoder_pop_debug_group<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::pop_debug_marker");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, _) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let cmb_raw = cmb.raw.last_mut().unwrap();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.end_debug_marker();
}
Ok(())
}
}
#[derive(Clone, Debug, Error)]
pub enum UsageConflict {
#[error("buffer {0:?} combined usage is {1:?}")]
Buffer(id::BufferId, wgt::BufferUsage),
#[error("texture {0:?} combined usage is {1:?}")]
Texture(id::TextureId, wgt::TextureUsage),
}
fn push_constant_clear<PushFn>(offset: u32, size_bytes: u32, mut push_fn: PushFn)
where
PushFn: FnMut(u32, &[u32]),

View File

@ -6,7 +6,7 @@ use crate::{
binding_model::BindError,
command::{
bind::{Binder, LayoutChange},
BasePass, BasePassRef, RenderCommandError,
BasePass, BasePassRef, CommandBuffer, CommandEncoderError, DrawError, RenderCommandError,
},
conv,
device::{
@ -16,7 +16,7 @@ use crate::{
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id,
pipeline::PipelineFlags,
resource::{BufferUse, TextureUse, TextureViewInner},
resource::{BufferUse, TextureUse, TextureView, TextureViewInner},
span,
track::TrackerSet,
validation::{
@ -274,22 +274,6 @@ impl OptionalState {
}
}
#[derive(Clone, Debug, Error, PartialEq)]
pub enum DrawError {
#[error("blend color needs to be set")]
MissingBlendColor,
#[error("stencil reference needs to be set")]
MissingStencilReference,
#[error("render pipeline must be set")]
MissingPipeline,
#[error("current render pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
IncompatibleBindGroup {
index: u32,
//expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
},
}
#[derive(Debug, Default)]
struct IndexState {
bound_buffer_view: Option<(id::BufferId, Range<BufferAddress>)>,
@ -407,6 +391,10 @@ impl State {
/// Error encountered when performing a render pass.
#[derive(Clone, Debug, Error)]
pub enum RenderPassError {
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
#[error("attachment texture view {0:?} is invalid")]
InvalidAttachment(id::TextureViewId),
#[error("attachment's sample count {0} is invalid")]
InvalidSampleCount(u8),
#[error("attachment with resolve target must be multi-sampled")]
@ -516,12 +504,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let mut trackers = TrackerSet::new(B::VARIANT);
let cmb = &mut cmb_guard[encoder_id];
let device = &device_guard[cmb.device_id.value];
let mut raw = device.cmd_allocator.extend(cmb);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmb_guard, encoder_id)?;
let device = &device_guard[cmd_buf.device_id.value];
let mut raw = device.cmd_allocator.extend(cmd_buf);
#[cfg(feature = "trace")]
match cmb.commands {
match cmd_buf.commands {
Some(ref mut list) => {
list.push(crate::device::trace::Command::RunRenderPass {
base: BasePass::from_ref(base),
@ -562,18 +550,34 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
use hal::device::Device as _;
let sample_count_limit = device.hal_limits.framebuffer_color_sample_counts;
let base_trackers = &cmb.trackers;
let base_trackers = &cmd_buf.trackers;
let mut extent = None;
let mut sample_count = 0;
let mut depth_stencil_aspects = hal::format::Aspects::empty();
let mut used_swap_chain = None::<Stored<id::SwapChainId>>;
let sample_count = color_attachments
.get(0)
.map(|at| view_guard[at.attachment].samples)
.unwrap_or(1);
if sample_count & sample_count_limit == 0 {
return Err(RenderPassError::InvalidSampleCount(sample_count));
}
let mut add_view = |view: &TextureView<B>| {
if let Some(ex) = extent {
if ex != view.extent {
return Err(RenderPassError::ExtentStateMismatch {
state_extent: ex,
view_extent: view.extent,
});
}
} else {
extent = Some(view.extent);
}
if sample_count == 0 {
sample_count = view.samples;
} else if sample_count != view.samples {
return Err(RenderPassError::SampleCountMismatch {
actual: view.samples,
expected: sample_count,
});
}
Ok(())
};
tracing::trace!(
"Encoding render pass begin in command buffer {:?}",
@ -585,17 +589,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let view = trackers
.views
.use_extend(&*view_guard, at.attachment, (), ())
.unwrap();
if let Some(ex) = extent {
if ex != view.extent {
return Err(RenderPassError::ExtentStateMismatch {
state_extent: ex,
view_extent: view.extent,
});
}
} else {
extent = Some(view.extent);
}
.map_err(|_| RenderPassError::InvalidAttachment(at.attachment))?;
add_view(view)?;
depth_stencil_aspects = view.range.aspects;
let source_id = match view.inner {
TextureViewInner::Native { ref source_id, .. } => source_id,
TextureViewInner::SwapChain { .. } => {
@ -648,23 +645,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let view = trackers
.views
.use_extend(&*view_guard, at.attachment, (), ())
.unwrap();
if let Some(ex) = extent {
if ex != view.extent {
return Err(RenderPassError::ExtentStateMismatch {
state_extent: ex,
view_extent: view.extent,
});
}
} else {
extent = Some(view.extent);
}
if view.samples != sample_count {
return Err(RenderPassError::SampleCountMismatch {
actual: view.samples,
expected: sample_count,
});
}
.map_err(|_| RenderPassError::InvalidAttachment(at.attachment))?;
add_view(view)?;
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
@ -690,7 +672,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
old_layout..new_layout
}
TextureViewInner::SwapChain { ref source_id, .. } => {
if let Some((ref sc_id, _)) = cmb.used_swap_chain {
if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain {
if source_id.value != sc_id.value {
return Err(RenderPassError::SwapChainMismatch);
}
@ -725,7 +707,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let view = trackers
.views
.use_extend(&*view_guard, resolve_target, (), ())
.unwrap();
.map_err(|_| RenderPassError::InvalidAttachment(resolve_target))?;
if extent != Some(view.extent) {
return Err(RenderPassError::ExtentStateMismatch {
state_extent: extent.unwrap_or_default(),
@ -735,6 +717,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if view.samples != 1 {
return Err(RenderPassError::InvalidResolveTargetSampleCount);
}
if sample_count == 1 {
return Err(RenderPassError::InvalidResolveSourceSampleCount);
}
let layouts = match view.inner {
TextureViewInner::Native { ref source_id, .. } => {
@ -760,7 +745,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
old_layout..new_layout
}
TextureViewInner::SwapChain { ref source_id, .. } => {
if let Some((ref sc_id, _)) = cmb.used_swap_chain {
if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain {
if source_id.value != sc_id.value {
return Err(RenderPassError::SwapChainMismatch);
}
@ -795,6 +780,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
};
if sample_count & sample_count_limit == 0 {
return Err(RenderPassError::InvalidSampleCount(sample_count));
}
let mut render_pass_cache = device.render_passes.lock();
let render_pass = match render_pass_cache.entry(rp_key.clone()) {
Entry::Occupied(e) => e.into_mut(),
@ -818,20 +807,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.zip(entry.key().resolves.iter())
{
let real_attachment_index = match at.resolve_target {
Some(resolve_attachment) => {
let attachment_sample_count = view_guard[at.attachment].samples;
if attachment_sample_count == 1 {
return Err(
RenderPassError::InvalidResolveSourceSampleCount,
);
}
if view_guard[resolve_attachment].samples != 1 {
return Err(
RenderPassError::InvalidResolveTargetSampleCount,
);
}
attachment_index + i
}
Some(_) => attachment_index + i,
None => hal::pass::ATTACHMENT_UNUSED,
};
resolve_ids.push((real_attachment_index, layout));
@ -839,14 +815,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
attachment_index += color_attachments.len();
}
let depth_id = depth_stencil_attachment.map(|at| {
let aspects = view_guard[at.attachment].range.aspects;
let depth_id = depth_stencil_attachment.map(|_| {
let usage = if is_ds_read_only {
TextureUse::ATTACHMENT_READ
} else {
TextureUse::ATTACHMENT_WRITE
};
(attachment_index, conv::map_texture_state(usage, aspects).1)
(
attachment_index,
conv::map_texture_state(usage, depth_stencil_aspects).1,
)
});
let subpass = hal::pass::SubpassDesc {
@ -867,17 +845,37 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut framebuffer_cache;
let fb_key = FramebufferKey {
colors: color_attachments.iter().map(|at| at.attachment).collect(),
colors: color_attachments
.iter()
.map(|at| id::Valid(at.attachment))
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| at.resolve_target)
.map(id::Valid)
.collect(),
depth_stencil: depth_stencil_attachment.map(|at| at.attachment),
depth_stencil: depth_stencil_attachment.map(|at| id::Valid(at.attachment)),
};
let context = RenderPassContext {
attachments: AttachmentData {
colors: fb_key
.colors
.iter()
.map(|&at| view_guard[at].format)
.collect(),
resolves: fb_key
.resolves
.iter()
.map(|&at| view_guard[at].format)
.collect(),
depth_stencil: fb_key.depth_stencil.map(|at| view_guard[at].format),
},
sample_count,
};
let framebuffer = match used_swap_chain.take() {
Some(sc_id) => {
assert!(cmb.used_swap_chain.is_none());
assert!(cmd_buf.used_swap_chain.is_none());
// Always create a new framebuffer and delete it after presentation.
let attachments = fb_key.all().map(|&id| match view_guard[id].inner {
TextureViewInner::Native { ref raw, .. } => raw,
@ -889,8 +887,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.create_framebuffer(&render_pass, attachments, extent.unwrap())
.or(Err(RenderPassError::OutOfMemory))?
};
cmb.used_swap_chain = Some((sc_id, framebuffer));
&mut cmb.used_swap_chain.as_mut().unwrap().1
cmd_buf.used_swap_chain = Some((sc_id, framebuffer));
&mut cmd_buf.used_swap_chain.as_mut().unwrap().1
}
None => {
// Cache framebuffers by the device.
@ -996,26 +994,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
}
RenderPassContext {
attachments: AttachmentData {
colors: color_attachments
.iter()
.map(|at| view_guard[at.attachment].format)
.collect(),
resolves: color_attachments
.iter()
.filter_map(|at| at.resolve_target)
.map(|resolve| view_guard[resolve].format)
.collect(),
depth_stencil: depth_stencil_attachment
.map(|at| view_guard[at.attachment].format),
},
sample_count,
}
context
};
let mut state = State {
binder: Binder::new(cmb.limits.max_bind_groups),
binder: Binder::new(cmd_buf.limits.max_bind_groups),
blend_color: OptionalState::Unused,
stencil_reference: OptionalState::Unused,
pipeline: OptionalState::Required,
@ -1055,7 +1038,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry(
index as usize,
bind_group_id,
id::Valid(bind_group_id),
bind_group,
offsets,
) {
@ -1314,7 +1297,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let pipeline_layout_id = state
.binder
.pipeline_layout_id
.ok_or(RenderCommandError::UnboundPipeline)?;
.ok_or(DrawError::MissingPipeline)?;
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
pipeline_layout
@ -1352,20 +1335,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let last_vertex = first_vertex + vertex_count;
let vertex_limit = state.vertex.vertex_limit;
if last_vertex > vertex_limit {
return Err(RenderCommandError::VertexBeyondLimit {
Err(DrawError::VertexBeyondLimit {
last_vertex,
vertex_limit,
}
.into());
})?
}
let last_instance = first_instance + instance_count;
let instance_limit = state.vertex.instance_limit;
if last_instance > instance_limit {
return Err(RenderCommandError::InstanceBeyondLimit {
Err(DrawError::InstanceBeyondLimit {
last_instance,
instance_limit,
}
.into());
})?
}
unsafe {
@ -1388,20 +1369,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let last_index = first_index + index_count;
let index_limit = state.index.limit;
if last_index > index_limit {
return Err(RenderCommandError::IndexBeyondLimit {
Err(DrawError::IndexBeyondLimit {
last_index,
index_limit,
}
.into());
})?
}
let last_instance = first_instance + instance_count;
let instance_limit = state.vertex.instance_limit;
if last_instance > instance_limit {
return Err(RenderCommandError::InstanceBeyondLimit {
Err(DrawError::InstanceBeyondLimit {
last_instance,
instance_limit,
}
.into());
})?
}
unsafe {
@ -1579,7 +1558,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
&*bind_group_guard,
&*pipeline_guard,
&*buffer_guard,
)?;
)
}
trackers.merge_extend(&bundle.used);
@ -1625,16 +1604,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
super::CommandBuffer::insert_barriers(
cmb.raw.last_mut().unwrap(),
&mut cmb.trackers,
cmd_buf.raw.last_mut().unwrap(),
&mut cmd_buf.trackers,
&trackers,
&*buffer_guard,
&*texture_guard,
);
unsafe {
cmb.raw.last_mut().unwrap().finish();
cmd_buf.raw.last_mut().unwrap().finish();
}
cmb.raw.push(raw);
cmd_buf.raw.push(raw);
Ok(())
}

View File

@ -5,6 +5,7 @@
#[cfg(feature = "trace")]
use crate::device::trace::Command as TraceCommand;
use crate::{
command::{CommandBuffer, CommandEncoderError},
conv,
device::{all_buffer_stages, all_image_stages},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
@ -15,31 +16,32 @@ use crate::{
use hal::command::CommandBuffer as _;
use thiserror::Error;
use wgt::{BufferAddress, BufferUsage, Extent3d, TextureDataLayout, TextureUsage};
use wgt::{BufferAddress, BufferUsage, Extent3d, TextureUsage};
use std::iter;
type Result = std::result::Result<(), TransferError>;
pub(crate) const BITS_PER_BYTE: u32 = 8;
pub type BufferCopyView = wgt::BufferCopyView<BufferId>;
pub type TextureCopyView = wgt::TextureCopyView<TextureId>;
/// Error encountered while attempting a data transfer.
#[derive(Copy, Clone, Debug, Error, Eq, PartialEq)]
#[derive(Clone, Debug, Error)]
pub enum TransferError {
#[error("buffer {0:?} is invalid")]
InvalidBuffer(BufferId),
#[error("texture {0:?} is invalid")]
InvalidTexture(TextureId),
#[error("source buffer/texture is missing the `COPY_SRC` usage flag")]
MissingCopySrcUsageFlag,
#[error("destination buffer/texture is missing the `COPY_DST` usage flag")]
MissingCopyDstUsageFlag,
#[error("copy would end up overruning the bounds of the destination buffer/texture")]
BufferOverrun,
#[error("buffer offset is not aligned to block size")]
UnalignedBufferOffset,
#[error("copy size does not respect `COPY_BUFFER_ALIGNMENT`")]
UnalignedCopySize,
#[error("buffer offset {0} is not aligned to block size or `COPY_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(BufferAddress),
#[error("copy size {0} does not respect `COPY_BUFFER_ALIGNMENT`")]
UnalignedCopySize(BufferAddress),
#[error("copy width is not a multiple of block width")]
UnalignedCopyWidth,
#[error("copy height is not a multiple of block height")]
@ -62,18 +64,32 @@ pub enum TransferError {
MismatchedAspects,
}
/// Error encountered while attempting to do a copy on a command encoder.
#[derive(Clone, Debug, Error)]
pub enum CopyError {
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
#[error(transparent)]
Transfer(#[from] TransferError),
}
//TODO: we currently access each texture twice for a transfer,
// once only to get the aspect flags, which is unfortunate.
pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
view: &TextureCopyView,
size: &Extent3d,
texture_guard: &Storage<Texture<B>, TextureId>,
) -> (
hal::image::SubresourceLayers,
hal::image::SubresourceRange,
hal::image::Offset,
) {
let texture = &texture_guard[view.texture];
) -> Result<
(
hal::image::SubresourceLayers,
hal::image::SubresourceRange,
hal::image::Offset,
),
TransferError,
> {
let texture = texture_guard
.get(view.texture)
.map_err(|_| TransferError::InvalidTexture(view.texture))?;
let aspects = texture.full_range.aspects;
let level = view.mip_level as hal::image::Level;
let (layer, layer_count, z) = match texture.dimension {
@ -88,7 +104,7 @@ pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
// TODO: Can't satisfy clippy here unless we modify
// `hal::image::SubresourceRange` in gfx to use `std::ops::RangeBounds`.
#[allow(clippy::range_plus_one)]
(
Ok((
hal::image::SubresourceLayers {
aspects,
level: view.mip_level as hal::image::Level,
@ -104,17 +120,17 @@ pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
y: view.origin.y as i32,
z,
},
)
))
}
/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
pub(crate) fn validate_linear_texture_data(
layout: &TextureDataLayout,
layout: &wgt::TextureDataLayout,
format: wgt::TextureFormat,
buffer_size: BufferAddress,
bytes_per_block: BufferAddress,
copy_size: &Extent3d,
) -> Result {
) -> Result<(), TransferError> {
// Convert all inputs to BufferAddress (u64) to prevent overflow issues
let copy_width = copy_size.width as BufferAddress;
let copy_height = copy_size.height as BufferAddress;
@ -162,7 +178,7 @@ pub(crate) fn validate_linear_texture_data(
return Err(TransferError::BufferOverrun);
}
if offset % block_size != 0 {
return Err(TransferError::UnalignedBufferOffset);
return Err(TransferError::UnalignedBufferOffset(offset));
}
if copy_height > 1 && bytes_per_row < bytes_in_a_complete_row {
return Err(TransferError::InvalidBytesPerRow);
@ -179,7 +195,7 @@ pub(crate) fn validate_texture_copy_range(
texture_format: wgt::TextureFormat,
texture_dimension: hal::image::Kind,
copy_size: &Extent3d,
) -> Result {
) -> Result<(), TransferError> {
let (block_width, block_height) = conv::texture_block_size(texture_format);
let mut extent = texture_dimension.level_extent(texture_copy_view.mip_level as u8);
@ -232,21 +248,21 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
destination: BufferId,
destination_offset: BufferAddress,
size: BufferAddress,
) -> Result {
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_buffer_to_buffer");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
let (buffer_guard, _) = hub.buffers.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
#[cfg(feature = "trace")]
match cmb.commands {
match cmd_buf.commands {
Some(ref mut list) => list.push(TraceCommand::CopyBufferToBuffer {
src: source,
src_offset: source_offset,
@ -262,41 +278,43 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let (src_buffer, src_pending) =
cmb.trackers
.buffers
.use_replace(&*buffer_guard, source, (), BufferUse::COPY_SRC);
let (src_buffer, src_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, source, (), BufferUse::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?;
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag);
Err(TransferError::MissingCopySrcUsageFlag)?
}
barriers.extend(src_pending.map(|pending| pending.into_hal(src_buffer)));
let (dst_buffer, dst_pending) =
cmb.trackers
.buffers
.use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST);
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
return Err(TransferError::MissingCopyDstUsageFlag);
Err(TransferError::MissingCopyDstUsageFlag)?
}
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_buffer)));
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedCopySize);
Err(TransferError::UnalignedCopySize(size))?
}
if source_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedBufferOffset);
Err(TransferError::UnalignedBufferOffset(source_offset))?
}
if destination_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedBufferOffset);
Err(TransferError::UnalignedBufferOffset(destination_offset))?
}
let source_end_offset = source_offset + size;
let destination_end_offset = destination_offset + size;
if source_end_offset > src_buffer.size {
return Err(TransferError::BufferOverrun);
Err(TransferError::BufferOverrun)?
}
if destination_end_offset > dst_buffer.size {
return Err(TransferError::BufferOverrun);
Err(TransferError::BufferOverrun)?
}
let region = hal::command::BufferCopy {
@ -304,7 +322,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
dst: destination_offset,
size,
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
@ -322,20 +340,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
source: &BufferCopyView,
destination: &TextureCopyView,
copy_size: &Extent3d,
) -> Result {
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_buffer_to_texture");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let (dst_layers, dst_range, dst_offset) =
texture_copy_view_to_hal(destination, copy_size, &*texture_guard);
texture_copy_view_to_hal(destination, copy_size, &*texture_guard)?;
#[cfg(feature = "trace")]
match cmb.commands {
match cmd_buf.commands {
Some(ref mut list) => list.push(TraceCommand::CopyBufferToTexture {
src: source.clone(),
dst: destination.clone(),
@ -349,39 +367,42 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let (src_buffer, src_pending) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
source.buffer,
(),
BufferUse::COPY_SRC,
);
let (src_buffer, src_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, source.buffer, (), BufferUse::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?;
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag);
Err(TransferError::MissingCopySrcUsageFlag)?
}
let src_barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
dst_range,
TextureUse::COPY_DST,
);
let (dst_texture, dst_pending) = cmd_buf
.trackers
.textures
.use_replace(
&*texture_guard,
destination.texture,
dst_range,
TextureUse::COPY_DST,
)
.unwrap();
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
return Err(TransferError::MissingCopyDstUsageFlag);
Err(TransferError::MissingCopyDstUsageFlag)?
}
let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_texture));
let bytes_per_row_alignment = wgt::COPY_BYTES_PER_ROW_ALIGNMENT;
let bytes_per_block = conv::map_texture_format(dst_texture.format, cmb.private_features)
let bytes_per_block = conv::map_texture_format(dst_texture.format, cmd_buf.private_features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let src_bytes_per_row = source.layout.bytes_per_row;
if bytes_per_row_alignment % bytes_per_block != 0 {
return Err(TransferError::UnalignedBytesPerRow);
Err(TransferError::UnalignedBytesPerRow)?
}
if src_bytes_per_row % bytes_per_row_alignment != 0 {
return Err(TransferError::UnalignedBytesPerRow);
Err(TransferError::UnalignedBytesPerRow)?
}
validate_texture_copy_range(destination, dst_texture.format, dst_texture.kind, copy_size)?;
validate_linear_texture_data(
@ -403,7 +424,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
image_offset: dst_offset,
image_extent: conv::map_extent(copy_size, dst_texture.dimension),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER,
@ -426,20 +447,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
source: &TextureCopyView,
destination: &BufferCopyView,
copy_size: &Extent3d,
) -> Result {
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_texture_to_buffer");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let (src_layers, src_range, src_offset) =
texture_copy_view_to_hal(source, copy_size, &*texture_guard);
texture_copy_view_to_hal(source, copy_size, &*texture_guard)?;
#[cfg(feature = "trace")]
match cmb.commands {
match cmd_buf.commands {
Some(ref mut list) => list.push(TraceCommand::CopyTextureToBuffer {
src: source.clone(),
dst: destination.clone(),
@ -453,39 +474,42 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
src_range,
TextureUse::COPY_SRC,
);
let (src_texture, src_pending) = cmd_buf
.trackers
.textures
.use_replace(
&*texture_guard,
source.texture,
src_range,
TextureUse::COPY_SRC,
)
.unwrap();
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag);
Err(TransferError::MissingCopySrcUsageFlag)?
}
let src_barriers = src_pending.map(|pending| pending.into_hal(src_texture));
let (dst_buffer, dst_barriers) = cmb.trackers.buffers.use_replace(
&*buffer_guard,
destination.buffer,
(),
BufferUse::COPY_DST,
);
let (dst_buffer, dst_barriers) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, destination.buffer, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
if !dst_buffer.usage.contains(BufferUsage::COPY_DST) {
return Err(TransferError::MissingCopyDstUsageFlag);
Err(TransferError::MissingCopyDstUsageFlag)?
}
let dst_barrier = dst_barriers.map(|pending| pending.into_hal(dst_buffer));
let bytes_per_row_alignment = wgt::COPY_BYTES_PER_ROW_ALIGNMENT;
let bytes_per_block = conv::map_texture_format(src_texture.format, cmb.private_features)
let bytes_per_block = conv::map_texture_format(src_texture.format, cmd_buf.private_features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
let dst_bytes_per_row = destination.layout.bytes_per_row;
if bytes_per_row_alignment % bytes_per_block != 0 {
return Err(TransferError::UnalignedBytesPerRow);
Err(TransferError::UnalignedBytesPerRow)?
}
if dst_bytes_per_row % bytes_per_row_alignment != 0 {
return Err(TransferError::UnalignedBytesPerRow);
Err(TransferError::UnalignedBytesPerRow)?
}
validate_texture_copy_range(source, src_texture.format, src_texture.kind, copy_size)?;
validate_linear_texture_data(
@ -507,7 +531,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
image_offset: src_offset,
image_extent: conv::map_extent(copy_size, src_texture.dimension),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER,
@ -530,29 +554,29 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
source: &TextureCopyView,
destination: &TextureCopyView,
copy_size: &Extent3d,
) -> Result {
) -> Result<(), CopyError> {
span!(_guard, INFO, "CommandEncoder::copy_texture_to_texture");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?;
let (_, mut token) = hub.buffers.read(&mut token); // skip token
let (texture_guard, _) = hub.textures.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let (src_layers, src_range, src_offset) =
texture_copy_view_to_hal(source, copy_size, &*texture_guard);
texture_copy_view_to_hal(source, copy_size, &*texture_guard)?;
let (dst_layers, dst_range, dst_offset) =
texture_copy_view_to_hal(destination, copy_size, &*texture_guard);
texture_copy_view_to_hal(destination, copy_size, &*texture_guard)?;
if src_layers.aspects != dst_layers.aspects {
return Err(TransferError::MismatchedAspects);
Err(TransferError::MismatchedAspects)?
}
#[cfg(feature = "trace")]
match cmb.commands {
match cmd_buf.commands {
Some(ref mut list) => list.push(TraceCommand::CopyTextureToTexture {
src: source.clone(),
dst: destination.clone(),
@ -566,25 +590,33 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
source.texture,
src_range,
TextureUse::COPY_SRC,
);
let (src_texture, src_pending) = cmd_buf
.trackers
.textures
.use_replace(
&*texture_guard,
source.texture,
src_range,
TextureUse::COPY_SRC,
)
.unwrap();
if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag);
Err(TransferError::MissingCopySrcUsageFlag)?
}
barriers.extend(src_pending.map(|pending| pending.into_hal(src_texture)));
let (dst_texture, dst_pending) = cmb.trackers.textures.use_replace(
&*texture_guard,
destination.texture,
dst_range,
TextureUse::COPY_DST,
);
let (dst_texture, dst_pending) = cmd_buf
.trackers
.textures
.use_replace(
&*texture_guard,
destination.texture,
dst_range,
TextureUse::COPY_DST,
)
.unwrap();
if !dst_texture.usage.contains(TextureUsage::COPY_DST) {
return Err(TransferError::MissingCopyDstUsageFlag);
Err(TransferError::MissingCopyDstUsageFlag)?
}
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_texture)));
@ -598,7 +630,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
dst_offset,
extent: conv::map_extent(copy_size, src_texture.dimension),
};
let cmb_raw = cmb.raw.last_mut().unwrap();
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.pipeline_barrier(
all_image_stages()..hal::pso::PipelineStage::TRANSFER,

View File

@ -8,7 +8,6 @@ use crate::{
};
use std::convert::TryInto;
use thiserror::Error;
pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) {
use hal::buffer::Usage as U;
@ -579,56 +578,38 @@ pub fn map_texture_dimension_size(
depth,
}: wgt::Extent3d,
sample_size: u32,
) -> Result<hal::image::Kind, MapTextureDimensionSizeError> {
) -> Result<hal::image::Kind, resource::TextureDimensionError> {
use hal::image::Kind as H;
use resource::TextureDimensionError as Tde;
use wgt::TextureDimension::*;
Ok(match dimension {
D1 => {
if height != 1 {
return Err(MapTextureDimensionSizeError::InvalidHeight);
return Err(Tde::InvalidHeight);
}
if sample_size != 1 {
return Err(MapTextureDimensionSizeError::InvalidSampleCount(
sample_size,
));
return Err(Tde::InvalidSampleCount(sample_size));
}
let layers = depth
.try_into()
.or(Err(MapTextureDimensionSizeError::TooManyLayers(depth)))?;
let layers = depth.try_into().or(Err(Tde::TooManyLayers(depth)))?;
H::D1(width, layers)
}
D2 => {
if sample_size > 32 || !is_power_of_two(sample_size) {
return Err(MapTextureDimensionSizeError::InvalidSampleCount(
sample_size,
));
return Err(Tde::InvalidSampleCount(sample_size));
}
let layers = depth
.try_into()
.or(Err(MapTextureDimensionSizeError::TooManyLayers(depth)))?;
let layers = depth.try_into().or(Err(Tde::TooManyLayers(depth)))?;
H::D2(width, height, layers, sample_size as u8)
}
D3 => {
if sample_size != 1 {
return Err(MapTextureDimensionSizeError::InvalidSampleCount(
sample_size,
));
return Err(Tde::InvalidSampleCount(sample_size));
}
H::D3(width, height, depth)
}
})
}
#[derive(Clone, Debug, Error)]
pub enum MapTextureDimensionSizeError {
#[error("too many layers ({0}) for texture array")]
TooManyLayers(u32),
#[error("1D textures must have height set to 1")]
InvalidHeight,
#[error("sample count {0} is invalid")]
InvalidSampleCount(u32),
}
pub fn map_texture_view_dimension(dimension: wgt::TextureViewDimension) -> hal::image::ViewKind {
use hal::image::ViewKind as H;
use wgt::TextureViewDimension::*;

View File

@ -5,6 +5,7 @@
#[cfg(feature = "trace")]
use crate::device::trace;
use crate::{
device::DeviceError,
hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
id, resource,
track::TrackerSet,
@ -14,7 +15,7 @@ use crate::{
use copyless::VecHelper as _;
use gfx_descriptor::{DescriptorAllocator, DescriptorSet};
use gfx_memory::{Heaps, MemoryBlock};
use hal::device::{Device as _, OomOrDeviceLost};
use hal::device::Device as _;
use parking_lot::Mutex;
use thiserror::Error;
@ -25,16 +26,16 @@ const CLEANUP_WAIT_MS: u64 = 5000;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Debug, Default)]
pub struct SuspectedResources {
pub(crate) buffers: Vec<id::BufferId>,
pub(crate) textures: Vec<id::TextureId>,
pub(crate) texture_views: Vec<id::TextureViewId>,
pub(crate) samplers: Vec<id::SamplerId>,
pub(crate) bind_groups: Vec<id::BindGroupId>,
pub(crate) compute_pipelines: Vec<id::ComputePipelineId>,
pub(crate) render_pipelines: Vec<id::RenderPipelineId>,
pub(crate) bind_group_layouts: Vec<id::BindGroupLayoutId>,
pub(crate) buffers: Vec<id::Valid<id::BufferId>>,
pub(crate) textures: Vec<id::Valid<id::TextureId>>,
pub(crate) texture_views: Vec<id::Valid<id::TextureViewId>>,
pub(crate) samplers: Vec<id::Valid<id::SamplerId>>,
pub(crate) bind_groups: Vec<id::Valid<id::BindGroupId>>,
pub(crate) compute_pipelines: Vec<id::Valid<id::ComputePipelineId>>,
pub(crate) render_pipelines: Vec<id::Valid<id::RenderPipelineId>>,
pub(crate) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
pub(crate) render_bundles: Vec<id::RenderBundleId>,
pub(crate) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
}
impl SuspectedResources {
@ -87,7 +88,7 @@ struct NonReferencedResources<B: hal::Backend> {
images: Vec<(B::Image, MemoryBlock<B>)>,
// Note: we keep the associated ID here in order to be able to check
// at any point what resources are used in a submission.
image_views: Vec<(id::TextureViewId, B::ImageView)>,
image_views: Vec<(id::Valid<id::TextureViewId>, B::ImageView)>,
samplers: Vec<B::Sampler>,
framebuffers: Vec<B::Framebuffer>,
desc_sets: Vec<DescriptorSet<B>>,
@ -182,13 +183,13 @@ struct ActiveSubmission<B: hal::Backend> {
index: SubmissionIndex,
fence: B::Fence,
last_resources: NonReferencedResources<B>,
mapped: Vec<id::BufferId>,
mapped: Vec<id::Valid<id::BufferId>>,
}
#[derive(Clone, Debug, Error)]
pub enum WaitIdleError {
#[error(transparent)]
OomOrDeviceLost(#[from] OomOrDeviceLost),
Device(#[from] DeviceError),
#[error("GPU got stuck :(")]
StuckGpu,
}
@ -219,7 +220,7 @@ pub(crate) struct LifetimeTracker<B: hal::Backend> {
/// Resources that are neither referenced or used, just life_tracker
/// actual deletion.
free_resources: NonReferencedResources<B>,
ready_to_map: Vec<id::BufferId>,
ready_to_map: Vec<id::Valid<id::BufferId>>,
}
impl<B: hal::Backend> LifetimeTracker<B> {
@ -263,22 +264,21 @@ impl<B: hal::Backend> LifetimeTracker<B> {
});
}
pub(crate) fn map(&mut self, buffer: id::BufferId, ref_count: RefCount) {
self.mapped.push(Stored {
value: buffer,
ref_count,
});
pub(crate) fn map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount) {
self.mapped.push(Stored { value, ref_count });
}
fn wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError> {
if !self.active.is_empty() {
tracing::debug!("Waiting for IDLE...");
let status = unsafe {
device.wait_for_fences(
self.active.iter().map(|a| &a.fence),
hal::device::WaitFor::All,
CLEANUP_WAIT_MS * 1_000_000,
)?
device
.wait_for_fences(
self.active.iter().map(|a| &a.fence),
hal::device::WaitFor::All,
CLEANUP_WAIT_MS * 1_000_000,
)
.map_err(DeviceError::from)?
};
tracing::debug!("...Done");
@ -353,9 +353,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
while let Some(id) = self.suspected_resources.render_bundles.pop() {
if trackers.bundles.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id)));
hub.render_bundles.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id.0)));
let res = hub.render_bundles.unregister_locked(id.0, &mut *guard);
self.suspected_resources.add_trackers(&res.used);
}
}
@ -368,9 +367,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
while let Some(id) = self.suspected_resources.bind_groups.pop() {
if trackers.bind_groups.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id)));
hub.bind_groups.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id.0)));
let res = hub.bind_groups.unregister_locked(id.0, &mut *guard);
self.suspected_resources.add_trackers(&res.used);
@ -392,9 +390,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.texture_views.drain(..) {
if trackers.views.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id)));
hub.texture_views.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id.0)));
let res = hub.texture_views.unregister_locked(id.0, &mut *guard);
let raw = match res.inner {
resource::TextureViewInner::Native { raw, source_id } => {
@ -422,9 +419,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.textures.drain(..) {
if trackers.textures.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id)));
hub.textures.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id.0)));
let res = hub.textures.unregister_locked(id.0, &mut *guard);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
@ -444,9 +440,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.samplers.drain(..) {
if trackers.samplers.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroySampler(id)));
hub.samplers.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroySampler(id.0)));
let res = hub.samplers.unregister_locked(id.0, &mut *guard);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
@ -466,9 +461,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.buffers.drain(..) {
if trackers.buffers.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id)));
hub.buffers.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id.0)));
let res = hub.buffers.unregister_locked(id.0, &mut *guard);
tracing::debug!("Buffer {:?} is detached", id);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
@ -489,9 +483,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.compute_pipelines.drain(..) {
if trackers.compute_pipes.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id)));
hub.compute_pipelines.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
let res = hub.compute_pipelines.unregister_locked(id.0, &mut *guard);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
@ -511,9 +504,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.render_pipelines.drain(..) {
if trackers.render_pipes.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id)));
hub.render_pipelines.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id.0)));
let res = hub.render_pipelines.unregister_locked(id.0, &mut *guard);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
@ -538,9 +530,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
//Note: this has to happen after all the suspected pipelines are destroyed
if ref_count.load() == 1 {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id)));
hub.pipeline_layouts.free_id(id);
let layout = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id.0)));
let layout = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard);
for &bgl_id in layout.bind_group_layout_ids.iter() {
bgl_guard[bgl_id].multi_ref_count.dec();
}
@ -559,9 +551,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
//Note: nothing else can bump the refcount since the guard is locked exclusively
if guard[id].multi_ref_count.is_empty() {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id)));
hub.bind_group_layouts.free_id(id);
let layout = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)));
let layout = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard);
self.free_resources.descriptor_set_layouts.push(layout.raw);
}
}
@ -616,7 +607,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for &at in key.all() {
// If this attachment is still registered, it's still valid
if texture_view_guard.contains(at) {
if texture_view_guard.contains(at.0) {
continue;
}
@ -686,8 +677,9 @@ impl<B: GfxBackend> LifetimeTracker<B> {
{
buffer.map_state = resource::BufferMapState::Idle;
tracing::debug!("Mapping request is dropped because the buffer is destroyed.");
hub.buffers.free_id(buffer_id);
let buffer = buffer_guard.remove(buffer_id).unwrap();
let buffer = hub
.buffers
.unregister_locked(buffer_id.0, &mut *buffer_guard);
self.free_resources
.buffers
.push((buffer.raw, buffer.memory));

File diff suppressed because it is too large Load Diff

View File

@ -5,16 +5,16 @@
#[cfg(feature = "trace")]
use crate::device::trace::Action;
use crate::{
command::{CommandAllocator, CommandBuffer, TextureCopyView, BITS_PER_BYTE},
command::{
texture_copy_view_to_hal, validate_linear_texture_data, validate_texture_copy_range,
CommandAllocator, CommandBuffer, TextureCopyView, TransferError, BITS_PER_BYTE,
},
conv,
device::WaitIdleError,
device::{DeviceError, WaitIdleError},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id,
resource::{BufferMapState, BufferUse, TextureUse},
resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse},
span,
validation::{
check_buffer_usage, check_texture_usage, MissingBufferUsageError, MissingTextureUsageError,
},
};
use gfx_memory::{Block, Heaps, MemoryBlock};
@ -82,35 +82,33 @@ impl<B: hal::Backend> super::Device<B> {
self.pending_writes.command_buffer.as_mut().unwrap()
}
fn prepare_stage(
&mut self,
size: wgt::BufferAddress,
) -> Result<StagingData<B>, PrepareStageError> {
fn prepare_stage(&mut self, size: wgt::BufferAddress) -> Result<StagingData<B>, DeviceError> {
let mut buffer = unsafe {
self.raw
.create_buffer(size, hal::buffer::Usage::TRANSFER_SRC)
.map_err(|err| match err {
hal::buffer::CreationError::OutOfMemory(_) => PrepareStageError::OutOfMemory,
hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to create staging buffer: {}", err),
})?
};
//TODO: do we need to transition into HOST_WRITE access first?
let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) };
let memory = self.mem_allocator.lock().allocate(
&self.raw,
&requirements,
gfx_memory::MemoryUsage::Staging { read_back: false },
gfx_memory::Kind::Linear,
)?;
let memory = self
.mem_allocator
.lock()
.allocate(
&self.raw,
&requirements,
gfx_memory::MemoryUsage::Staging { read_back: false },
gfx_memory::Kind::Linear,
)
.map_err(DeviceError::from_heaps)?;
unsafe {
self.raw.set_buffer_name(&mut buffer, "<write_buffer_temp>");
self.raw
.bind_buffer_memory(memory.memory(), memory.segment().offset, &mut buffer)
.map_err(|err| match err {
hal::device::BindError::OutOfMemory(_) => PrepareStageError::OutOfMemory,
_ => panic!("failed to bind buffer memory: {}", err),
})?;
.map_err(DeviceError::from_bind)?;
}
let cmdbuf = match self.pending_writes.command_buffer.take() {
@ -131,6 +129,28 @@ impl<B: hal::Backend> super::Device<B> {
}
}
#[derive(Clone, Debug, Error)]
pub enum QueueWriteError {
#[error(transparent)]
Queue(#[from] DeviceError),
#[error(transparent)]
Transfer(#[from] TransferError),
}
#[derive(Clone, Debug, Error)]
pub enum QueueSubmitError {
#[error(transparent)]
Queue(#[from] DeviceError),
#[error("command buffer {0:?} is invalid")]
InvalidCommandBuffer(id::CommandBufferId),
#[error(transparent)]
BufferAccess(#[from] BufferAccessError),
#[error("swap chain output was dropped before the command buffer got submitted")]
SwapChainOutputDropped,
#[error("GPU got stuck :(")]
StuckGpu,
}
//TODO: move out common parts of write_xxx.
impl<G: GlobalIdentityHandlerFactory> Global<G> {
@ -140,13 +160,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
buffer_id: id::BufferId,
buffer_offset: wgt::BufferAddress,
data: &[u8],
) -> Result<(), QueueWriteBufferError> {
) -> Result<(), QueueWriteError> {
span!(_guard, INFO, "Queue::write_buffer");
let hub = B::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = &mut device_guard[queue_id];
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let (buffer_guard, _) = hub.buffers.read(&mut token);
#[cfg(feature = "trace")]
@ -176,7 +198,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.memory
.map(&device.raw, hal::memory::Segment::ALL)
.map_err(|err| match err {
hal::device::MapError::OutOfMemory(_) => PrepareStageError::OutOfMemory,
hal::device::MapError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to map buffer: {}", err),
})?;
unsafe { mapped.write(&device.raw, hal::memory::Segment::ALL) }
@ -186,27 +208,23 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
let mut trackers = device.trackers.lock();
let (dst, transition) =
trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST);
check_buffer_usage(dst.usage, wgt::BufferUsage::COPY_DST)?;
let (dst, transition) = trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)?;
}
dst.life_guard.use_at(device.active_submission_index + 1);
if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(QueueWriteBufferError::UnalignedDataSize(data_size));
Err(TransferError::UnalignedCopySize(data_size))?
}
if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(QueueWriteBufferError::UnalignedBufferOffset(buffer_offset));
Err(TransferError::UnalignedBufferOffset(buffer_offset))?
}
let destination_start_offset = buffer_offset;
let destination_end_offset = buffer_offset + data_size;
if destination_end_offset > dst.size {
return Err(QueueWriteBufferError::BufferOverrun {
start: destination_start_offset,
end: destination_end_offset,
size: dst.size,
});
if buffer_offset + data_size > dst.size {
Err(TransferError::BufferOverrun)?
}
let region = hal::command::BufferCopy {
@ -243,16 +261,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
data: &[u8],
data_layout: &wgt::TextureDataLayout,
size: &wgt::Extent3d,
) -> Result<(), QueueWriteTextureError> {
) -> Result<(), QueueWriteError> {
span!(_guard, INFO, "Queue::write_texture");
let hub = B::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = &mut device_guard[queue_id];
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let (texture_guard, _) = hub.textures.read(&mut token);
let (image_layers, image_range, image_offset) =
crate::command::texture_copy_view_to_hal(destination, size, &*texture_guard);
texture_copy_view_to_hal(destination, size, &*texture_guard)?;
#[cfg(feature = "trace")]
match device.trace {
@ -274,12 +294,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(());
}
let texture_format = texture_guard[destination.texture].format;
let texture_format = texture_guard.get(destination.texture).unwrap().format;
let bytes_per_block = conv::map_texture_format(texture_format, device.private_features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
crate::command::validate_linear_texture_data(
validate_linear_texture_data(
data_layout,
texture_format,
data.len() as wgt::BufferAddress,
@ -302,18 +322,36 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let block_rows_in_copy = (size.depth - 1) * block_rows_per_image + height_blocks;
let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64;
let mut stage = device.prepare_stage(stage_size)?;
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers
.textures
.use_replace(
&*texture_guard,
destination.texture,
image_range,
TextureUse::COPY_DST,
)
.unwrap();
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)?
}
validate_texture_copy_range(destination, dst.format, dst.kind, size)?;
dst.life_guard.use_at(device.active_submission_index + 1);
{
let mut mapped = stage
.memory
.map(&device.raw, hal::memory::Segment::ALL)
.map_err(|err| match err {
hal::device::MapError::OutOfMemory(_) => PrepareStageError::OutOfMemory,
hal::device::MapError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to map staging buffer: {}", err),
})?;
let mapping = unsafe { mapped.write(&device.raw, hal::memory::Segment::ALL) }
.expect("failed to get writer to mapped staging buffer");
if stage_bytes_per_row == data_layout.bytes_per_row {
// Unlikely case of the data already being aligned optimally.
// Fast path if the data isalready being aligned optimally.
mapping.slice[..stage_size as usize].copy_from_slice(data);
} else {
// Copy row by row into the optimal alignment.
@ -333,18 +371,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers.textures.use_replace(
&*texture_guard,
destination.texture,
image_range,
TextureUse::COPY_DST,
);
check_texture_usage(dst.usage, wgt::TextureUsage::COPY_DST)?;
crate::command::validate_texture_copy_range(destination, dst.format, dst.kind, size)?;
dst.life_guard.use_at(device.active_submission_index + 1);
let region = hal::command::BufferImageCopy {
buffer_offset: 0,
buffer_width: (stage_bytes_per_row / bytes_per_block) * block_width,
@ -391,7 +417,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let callbacks = {
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = &mut device_guard[queue_id];
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let pending_write_command_buffer =
device
.pending_writes
@ -428,7 +456,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
let cmdbuf = &mut command_buffer_guard[cmb_id];
let cmdbuf = command_buffer_guard
.get_mut(cmb_id)
.map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
#[cfg(feature = "trace")]
match device.trace {
Some(ref trace) => trace.lock().add(Action::Submit(
@ -532,12 +562,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let fence = device
.raw
.create_fence(false)
.or(Err(QueueSubmitError::OutOfMemory))?;
.or(Err(DeviceError::OutOfMemory))?;
let submission = hal::queue::Submission {
command_buffers: pending_write_command_buffer.as_ref().into_iter().chain(
command_buffer_ids
.iter()
.flat_map(|&cmb_id| &command_buffer_guard[cmb_id].raw),
.flat_map(|&cmb_id| &command_buffer_guard.get(cmb_id).unwrap().raw),
),
wait_semaphores: Vec::new(),
signal_semaphores: signal_swapchain_semaphores
@ -557,7 +587,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.after_submit_internal(comb_raw, submit_index);
}
let callbacks = device.maintain(&hub, false, &mut token)?;
let callbacks = match device.maintain(&hub, false, &mut token) {
Ok(callbacks) => callbacks,
Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
};
super::Device::lock_life_internal(&device.life_tracker, &mut token).track_submission(
submit_index,
fence,
@ -580,54 +614,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
#[derive(Clone, Debug, Error)]
pub enum PrepareStageError {
#[error(transparent)]
Allocation(#[from] gfx_memory::HeapsError),
#[error("not enough memory left")]
OutOfMemory,
}
#[derive(Clone, Debug, Error)]
pub enum QueueWriteBufferError {
#[error("write buffer with indices {start}..{end} would overrun buffer of size {size}")]
BufferOverrun {
start: wgt::BufferAddress,
end: wgt::BufferAddress,
size: wgt::BufferAddress,
},
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error(transparent)]
Stage(#[from] PrepareStageError),
#[error("buffer offset {0} does not respect `COPY_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(wgt::BufferAddress),
#[error("buffer write size {0} does not respect `COPY_BUFFER_ALIGNMENT`")]
UnalignedDataSize(wgt::BufferAddress),
}
#[derive(Clone, Debug, Error)]
pub enum QueueWriteTextureError {
#[error(transparent)]
MissingTextureUsage(#[from] MissingTextureUsageError),
#[error(transparent)]
Stage(#[from] PrepareStageError),
#[error(transparent)]
Transfer(#[from] crate::command::TransferError),
}
#[derive(Clone, Debug, Error)]
pub enum QueueSubmitError {
#[error(transparent)]
BufferAccess(#[from] super::BufferAccessError),
#[error("not enough memory left")]
OutOfMemory,
#[error("swap chain output was dropped before the command buffer got submitted")]
SwapChainOutputDropped,
#[error(transparent)]
WaitIdle(#[from] WaitIdleError),
}
fn get_lowest_common_denom(a: u32, b: u32) -> u32 {
let gcd = if a >= b {
get_greatest_common_divisor(a, b)

View File

@ -10,7 +10,7 @@ use crate::{
id::{
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, ComputePipelineId,
DeviceId, PipelineLayoutId, RenderBundleId, RenderPipelineId, SamplerId, ShaderModuleId,
SurfaceId, SwapChainId, TextureId, TextureViewId, TypedId,
SurfaceId, SwapChainId, TextureId, TextureViewId, TypedId, Valid,
},
instance::{Adapter, Instance, Surface},
pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
@ -83,6 +83,9 @@ enum Element<T> {
Error(Epoch),
}
#[derive(Clone, Debug)]
pub(crate) struct InvalidId;
#[derive(Debug)]
pub struct Storage<T, I: TypedId> {
map: Vec<Element<T>>,
@ -90,38 +93,16 @@ pub struct Storage<T, I: TypedId> {
_phantom: PhantomData<I>,
}
impl<T, I: TypedId> ops::Index<I> for Storage<T, I> {
impl<T, I: TypedId> ops::Index<Valid<I>> for Storage<T, I> {
type Output = T;
fn index(&self, id: I) -> &T {
let (index, epoch, _) = id.unzip();
let (ref value, storage_epoch) = match self.map[index as usize] {
Element::Occupied(ref v, epoch) => (v, epoch),
Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
Element::Error(_) => panic!("{}[{}] is in error state", self.kind, index),
};
assert_eq!(
epoch, storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
value
fn index(&self, id: Valid<I>) -> &T {
self.get(id.0).unwrap()
}
}
impl<T, I: TypedId> ops::IndexMut<I> for Storage<T, I> {
fn index_mut(&mut self, id: I) -> &mut T {
let (index, epoch, _) = id.unzip();
let (value, storage_epoch) = match self.map[index as usize] {
Element::Occupied(ref mut v, epoch) => (v, epoch),
Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
Element::Error(_) => panic!("{}[{}] is in error state", self.kind, index),
};
assert_eq!(
epoch, storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
value
impl<T, I: TypedId> ops::IndexMut<Valid<I>> for Storage<T, I> {
fn index_mut(&mut self, id: Valid<I>) -> &mut T {
self.get_mut(id.0).unwrap()
}
}
@ -136,6 +117,40 @@ impl<T, I: TypedId> Storage<T, I> {
}
}
/// Get a reference to an item behind a potentially invalid ID.
/// Panics if there is an epoch mismatch, or the entry is empty.
pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> {
let (index, epoch, _) = id.unzip();
let (result, storage_epoch) = match self.map[index as usize] {
Element::Occupied(ref v, epoch) => (Ok(v), epoch),
Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
Element::Error(epoch) => (Err(InvalidId), epoch),
};
assert_eq!(
epoch, storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
result
}
/// Get a mutable reference to an item behind a potentially invalid ID.
/// Panics if there is an epoch mismatch, or the entry is empty.
pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> {
let (index, epoch, _) = id.unzip();
let (result, storage_epoch) = match self.map[index as usize] {
Element::Occupied(ref mut v, epoch) => (Ok(v), epoch),
Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
Element::Error(epoch) => (Err(InvalidId), epoch),
};
assert_eq!(
epoch, storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
result
}
fn insert_impl(&mut self, index: usize, element: Element<T>) {
if index >= self.map.len() {
self.map.resize_with(index + 1, || Element::Vacant);
@ -402,18 +417,16 @@ impl<T, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
) -> (RwLockWriteGuard<'a, Storage<T, I>>, Token<'a, T>) {
(self.data.write(), Token::new())
}
}
impl<T, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
pub fn register_identity<A: Access<T>>(
pub(crate) fn register_identity<A: Access<T>>(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
value: T,
token: &mut Token<A>,
) -> I {
) -> Valid<I> {
let id = self.identity.process(id_in, self.backend);
self.register(id, value, token);
id
Valid(id)
}
pub fn register_error<A: Access<T>>(
@ -427,6 +440,13 @@ impl<T, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
id
}
pub fn unregister_locked(&self, id: I, guard: &mut Storage<T, I>) -> T {
let value = guard.remove(id).unwrap();
//Note: careful about the order here!
self.identity.free(id);
value
}
pub fn unregister<'a, A: Access<T>>(
&self,
id: I,
@ -438,6 +458,10 @@ impl<T, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
(value, Token::new())
}
pub fn process_id(&self, id_in: <F::Filter as IdentityHandler<I>>::Input) -> I {
self.identity.process(id_in, self.backend)
}
pub fn free_id(&self, id: I) {
self.identity.free(id)
}
@ -591,7 +615,9 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
for (index, element) in self.swap_chains.data.write().map.drain(..).enumerate() {
if let Element::Occupied(swap_chain, epoch) = element {
let device = &devices[swap_chain.device_id.value];
let surface = &mut surface_guard[TypedId::zip(index as Index, epoch, B::VARIANT)];
let surface = surface_guard
.get_mut(TypedId::zip(index as Index, epoch, B::VARIANT))
.unwrap();
let suf = B::get_surface_mut(surface);
unsafe {
device.raw.destroy_semaphore(swap_chain.semaphore);

View File

@ -53,8 +53,8 @@ impl<T> From<SerialId> for Id<T> {
impl<T> Id<T> {
#[cfg(test)]
pub(crate) fn dummy() -> Self {
Id(NonZeroU64::new(1).unwrap(), PhantomData)
pub(crate) fn dummy() -> Valid<Self> {
Valid(Id(NonZeroU64::new(1).unwrap(), PhantomData))
}
pub fn backend(self) -> Backend {
@ -110,6 +110,14 @@ impl<T> Ord for Id<T> {
}
}
/// An internal ID that has been checked to point to
/// a valid object in the storages.
#[repr(transparent)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub(crate) struct Valid<I>(pub I);
pub trait TypedId {
fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self;
fn unzip(self) -> (Index, Epoch, Backend);

View File

@ -6,17 +6,12 @@ use crate::{
backend,
device::Device,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{AdapterId, DeviceId, SurfaceId},
id::{AdapterId, DeviceId, SurfaceId, Valid},
power, span, LifeGuard, PrivateFeatures, Stored, MAX_BIND_GROUPS,
};
use wgt::{Backend, BackendBit, DeviceDescriptor, PowerPreference, BIND_BUFFER_ALIGNMENT};
#[cfg(feature = "replay")]
use serde::Deserialize;
#[cfg(feature = "trace")]
use serde::Serialize;
use hal::{
adapter::{AdapterInfo as HalAdapterInfo, DeviceType as HalDeviceType, PhysicalDevice as _},
queue::QueueFamily as _,
@ -215,8 +210,8 @@ impl<B: hal::Backend> Adapter<B> {
/// Metadata about a backend adapter.
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "trace", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct AdapterInfo {
/// Adapter name
pub name: String,
@ -252,6 +247,8 @@ impl AdapterInfo {
#[derive(Clone, Debug, Error, PartialEq)]
/// Error when requesting a device from the adaptor
pub enum RequestDeviceError {
#[error("parent adapter is invalid")]
InvalidAdapter,
#[error("connection to device was lost during initialization")]
DeviceLost,
#[error("device initialization failed due to implementation specific errors")]
@ -269,8 +266,8 @@ pub enum RequestDeviceError {
/// Supported physical device types.
#[repr(u8)]
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "trace", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum DeviceType {
/// Other.
Other,
@ -316,6 +313,18 @@ impl<I: Clone> AdapterInputs<'_, I> {
}
}
#[error("adapter is invalid")]
#[derive(Clone, Debug, Error)]
pub struct InvalidAdapter;
#[derive(Clone, Debug, Error)]
pub enum RequestAdapterError {
#[error("no suitable adapter found")]
NotFound,
#[error("surface {0:?} is invalid")]
InvalidSurface(SurfaceId),
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
#[cfg(feature = "raw-window-handle")]
pub fn instance_create_surface(
@ -347,7 +356,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
let mut token = Token::root();
self.surfaces.register_identity(id_in, surface, &mut token)
let id = self.surfaces.register_identity(id_in, surface, &mut token);
id.0
}
pub fn enumerate_adapters(&self, inputs: AdapterInputs<Input<G, AdapterId>>) -> Vec<AdapterId> {
@ -360,15 +370,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
backends_map! {
let map = |(instance_field, backend, backend_info, backend_hub)| {
if let Some(inst) = instance_field {
let hub = backend_hub(self);
if let Some(id_backend) = inputs.find(backend) {
for raw in inst.enumerate_adapters() {
let adapter = Adapter::new(raw);
tracing::info!("Adapter {} {:?}", backend_info, adapter.raw.info);
adapters.push(backend_hub(self).adapters.register_identity(
let id = hub.adapters.register_identity(
id_backend.clone(),
adapter,
&mut token,
));
);
adapters.push(id.0);
}
}
}
@ -387,17 +399,24 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
adapters
}
pub fn pick_adapter(
pub fn request_adapter(
&self,
desc: &RequestAdapterOptions,
inputs: AdapterInputs<Input<G, AdapterId>>,
) -> Option<AdapterId> {
) -> Result<AdapterId, RequestAdapterError> {
span!(_guard, INFO, "Instance::pick_adapter");
let instance = &self.instance;
let mut token = Token::root();
let (surface_guard, mut token) = self.surfaces.read(&mut token);
let compatible_surface = desc.compatible_surface.map(|id| &surface_guard[id]);
let compatible_surface = desc
.compatible_surface
.map(|id| {
surface_guard
.get(id)
.map_err(|_| RequestAdapterError::InvalidSurface(id))
})
.transpose()?;
let mut device_types = Vec::new();
let mut id_vulkan = inputs.find(Backend::Vulkan);
@ -458,8 +477,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
if device_types.is_empty() {
tracing::warn!("No adapters are available!");
return None;
return Err(RequestAdapterError::NotFound);
}
let (mut integrated, mut discrete, mut virt, mut other) = (None, None, None, None);
@ -509,7 +527,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
adapter,
&mut token,
);
return Some(id);
return Ok(id.0);
}
selected -= adapters_backend.len();
};
@ -532,39 +550,52 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
id_dx11.take(),
);
tracing::warn!("Some adapters are present, but enumerating them failed!");
None
Err(RequestAdapterError::NotFound)
}
pub fn adapter_get_info<B: GfxBackend>(&self, adapter_id: AdapterId) -> AdapterInfo {
pub fn adapter_get_info<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<AdapterInfo, InvalidAdapter> {
span!(_guard, INFO, "Adapter::get_info");
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id];
AdapterInfo::from_gfx(adapter.raw.info.clone(), adapter_id.backend())
adapter_guard
.get(adapter_id)
.map(|adapter| AdapterInfo::from_gfx(adapter.raw.info.clone(), adapter_id.backend()))
.map_err(|_| InvalidAdapter)
}
pub fn adapter_features<B: GfxBackend>(&self, adapter_id: AdapterId) -> wgt::Features {
pub fn adapter_features<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<wgt::Features, InvalidAdapter> {
span!(_guard, INFO, "Adapter::features");
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id];
adapter.features
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.features)
.map_err(|_| InvalidAdapter)
}
pub fn adapter_limits<B: GfxBackend>(&self, adapter_id: AdapterId) -> wgt::Limits {
pub fn adapter_limits<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<wgt::Limits, InvalidAdapter> {
span!(_guard, INFO, "Adapter::limits");
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id];
adapter.limits.clone()
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.limits.clone())
.map_err(|_| InvalidAdapter)
}
pub fn adapter_destroy<B: GfxBackend>(&self, adapter_id: AdapterId) {
@ -572,18 +603,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self);
let mut token = Token::root();
let (mut guard, _) = hub.adapters.write(&mut token);
let (mut adapter_guard, _) = hub.adapters.write(&mut token);
if guard[adapter_id]
.life_guard
.ref_count
.take()
.unwrap()
.load()
== 1
{
hub.adapters.free_id(adapter_id);
let _adapter = guard.remove(adapter_id).unwrap();
match adapter_guard.get_mut(adapter_id) {
Ok(adapter) => {
if adapter.life_guard.ref_count.take().unwrap().load() == 1 {
hub.adapters
.unregister_locked(adapter_id, &mut *adapter_guard);
}
}
Err(_) => {
hub.adapters.free_id(adapter_id);
}
}
}
}
@ -602,7 +633,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let device = {
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id];
let adapter = adapter_guard
.get(adapter_id)
.map_err(|_| RequestDeviceError::InvalidAdapter)?;
let phd = &adapter.raw.physical_device;
// Verify all features were exposed by the adapter
@ -729,7 +762,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Device::new(
gpu.device,
Stored {
value: adapter_id,
value: Valid(adapter_id),
ref_count: adapter.life_guard.add_ref(),
},
gpu.queue_groups.swap_remove(0),
@ -742,6 +775,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.or(Err(RequestDeviceError::OutOfMemory))?
};
Ok(hub.devices.register_identity(id_in, device, &mut token))
let id = hub.devices.register_identity(id_in, device, &mut token);
Ok(id.0)
}
}

View File

@ -43,8 +43,6 @@ pub mod swap_chain;
mod track;
mod validation;
pub use hal::pso::read_spirv;
#[cfg(test)]
use loom::sync::atomic;
#[cfg(not(test))]
@ -198,7 +196,7 @@ impl LifeGuard {
#[derive(Clone, Debug)]
struct Stored<T> {
value: T,
value: id::Valid<T>,
ref_count: RefCount,
}

View File

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
device::RenderPassContext,
device::{DeviceError, RenderPassContext},
id::{DeviceId, PipelineLayoutId, ShaderModuleId},
validation::StageError,
LifeGuard, RefCount, Stored,
@ -27,17 +27,27 @@ pub struct ShaderModule<B: hal::Backend> {
pub(crate) module: Option<naga::Module>,
}
#[derive(Clone, Debug, Error)]
pub enum CreateShaderModuleError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error(transparent)]
Validation(#[from] naga::proc::ValidationError),
}
pub type ProgrammableStageDescriptor<'a> = wgt::ProgrammableStageDescriptor<'a, ShaderModuleId>;
pub type ComputePipelineDescriptor<'a> =
wgt::ComputePipelineDescriptor<PipelineLayoutId, ProgrammableStageDescriptor<'a>>;
#[derive(Clone, Debug, Error)]
pub enum ComputePipelineError {
pub enum CreateComputePipelineError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("pipelie layout is invalid")]
InvalidLayout,
#[error(transparent)]
Stage(StageError),
#[error(transparent)]
HalCreationError(#[from] hal::pso::CreationError),
}
#[derive(Debug)]
@ -58,7 +68,11 @@ pub type RenderPipelineDescriptor<'a> =
wgt::RenderPipelineDescriptor<'a, PipelineLayoutId, ProgrammableStageDescriptor<'a>>;
#[derive(Clone, Debug, Error)]
pub enum RenderPipelineError {
pub enum CreateRenderPipelineError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("pipelie layout is invalid")]
InvalidLayout,
#[error("incompatible output format at index {index}")]
IncompatibleOutputFormat { index: u8 },
#[error("invalid sample count {0}")]
@ -72,8 +86,6 @@ pub enum RenderPipelineError {
},
#[error("missing required device features {0:?}")]
MissingFeature(wgt::Features),
#[error("not enough memory left")]
OutOfMemory,
#[error("error in stage {flag:?}: {error}")]
Stage {
flag: wgt::ShaderStage,

View File

@ -3,19 +3,22 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
device::DeviceError,
id::{DeviceId, SwapChainId, TextureId},
track::DUMMY_SELECTOR,
validation::MissingBufferUsageError,
LifeGuard, RefCount, Stored,
};
use gfx_memory::MemoryBlock;
use thiserror::Error;
use wgt::{BufferAddress, BufferUsage, TextureFormat, TextureUsage};
use std::{borrow::Borrow, ptr::NonNull};
bitflags::bitflags! {
/// The internal enum mirrored from `BufferUsage`. The values don't have to match!
pub (crate) struct BufferUse: u32 {
pub struct BufferUse: u32 {
const EMPTY = 0;
const MAP_READ = 1;
const MAP_WRITE = 2;
@ -42,7 +45,7 @@ bitflags::bitflags! {
bitflags::bitflags! {
/// The internal enum mirrored from `TextureUsage`. The values don't have to match!
pub(crate) struct TextureUse: u32 {
pub struct TextureUse: u32 {
const EMPTY = 0;
const COPY_SRC = 1;
const COPY_DST = 2;
@ -117,6 +120,33 @@ impl BufferMapOperation {
}
}
#[derive(Clone, Debug, Error)]
pub enum BufferAccessError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("buffer is invalid")]
InvalidBuffer,
#[error("buffer is already mapped")]
AlreadyMapped,
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error("buffer is not mapped")]
NotMapped,
#[error("buffer map range does not respect `COPY_BUFFER_ALIGNMENT`")]
UnalignedRange,
}
impl From<hal::device::MapError> for BufferAccessError {
fn from(error: hal::device::MapError) -> Self {
match error {
hal::device::MapError::OutOfMemory(_) => {
BufferAccessError::Device(DeviceError::OutOfMemory)
}
_ => panic!("failed to map buffer: {}", error),
}
}
}
#[derive(Debug)]
pub(crate) struct BufferPendingMapping {
pub sub_range: hal::buffer::SubRange,
@ -138,6 +168,18 @@ pub struct Buffer<B: hal::Backend> {
pub(crate) map_state: BufferMapState<B>,
}
#[derive(Clone, Debug, Error)]
pub enum CreateBufferError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("failed to map buffer while creating: {0}")]
AccessError(#[from] BufferAccessError),
#[error("buffers that are mapped at creation have to be aligned to `COPY_BUFFER_ALIGNMENT`")]
UnalignedSize,
#[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")]
UsageMismatch(BufferUsage),
}
impl<B: hal::Backend> Borrow<RefCount> for Buffer<B> {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()
@ -163,6 +205,30 @@ pub struct Texture<B: hal::Backend> {
pub(crate) life_guard: LifeGuard,
}
#[derive(Clone, Debug, Error)]
pub enum TextureDimensionError {
#[error("too many layers ({0}) for texture array")]
TooManyLayers(u32),
#[error("1D textures must have height set to 1")]
InvalidHeight,
#[error("sample count {0} is invalid")]
InvalidSampleCount(u32),
}
#[derive(Clone, Debug, Error)]
pub enum CreateTextureError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("D24Plus textures cannot be copied")]
CannotCopyD24Plus,
#[error(transparent)]
InvalidDimension(#[from] TextureDimensionError),
#[error("texture descriptor mip level count ({0}) must be less than `MAX_MIP_LEVELS`")]
InvalidMipLevelCount(u32),
#[error("Feature {0:?} must be enabled to create a texture of type {1:?}")]
MissingFeature(wgt::Features, TextureFormat),
}
impl<B: hal::Backend> Borrow<RefCount> for Texture<B> {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()
@ -198,6 +264,26 @@ pub struct TextureView<B: hal::Backend> {
pub(crate) life_guard: LifeGuard,
}
#[derive(Clone, Debug, Error)]
pub enum CreateTextureViewError {
#[error("parent texture is invalid")]
InvalidTexture,
#[error("not enough memory left")]
OutOfMemory,
#[error(
"TextureView mip level count + base mip level {requested} must be <= Texture mip level count {total}"
)]
InvalidMipLevelCount { requested: u32, total: u8 },
#[error("TextureView array layer count + base array layer {requested} must be <= Texture depth/array layer count {total}")]
InvalidArrayLayerCount { requested: u32, total: u16 },
}
#[derive(Clone, Debug, Error)]
pub enum TextureViewDestroyError {
#[error("cannot destroy swap chain image")]
SwapChainImage,
}
impl<B: hal::Backend> Borrow<RefCount> for TextureView<B> {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()
@ -219,6 +305,16 @@ pub struct Sampler<B: hal::Backend> {
pub(crate) comparison: bool,
}
#[derive(Clone, Debug, Error)]
pub enum CreateSamplerError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("invalid anisotropic clamp {0}, must be one of 1, 2, 4, 8 or 16")]
InvalidClamp(u8),
#[error("cannot create any more samplers")]
TooManyObjects,
}
impl<B: hal::Backend> Borrow<RefCount> for Sampler<B> {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()

View File

@ -36,8 +36,9 @@
use crate::device::trace::Action;
use crate::{
conv,
device::DeviceError,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{DeviceId, SwapChainId, TextureViewId},
id::{DeviceId, SwapChainId, TextureViewId, Valid},
resource, span, LifeGuard, PrivateFeatures, Stored, SubmissionIndex,
};
@ -60,6 +61,35 @@ pub struct SwapChain<B: hal::Backend> {
pub(crate) active_submission_index: SubmissionIndex,
}
#[derive(Clone, Debug, Error)]
pub enum SwapChainError {
#[error("swap chain is invalid")]
Invalid,
#[error("parent surface is invalid")]
InvalidSurface,
#[error(transparent)]
Device(#[from] DeviceError),
#[error("swap chain image is already acquired")]
AlreadyAcquired,
}
#[derive(Clone, Debug, Error)]
pub enum CreateSwapChainError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("invalid surface")]
InvalidSurface,
#[error("`SwapChainOutput` must be dropped before a new `SwapChain` is made")]
SwapChainOutputExists,
#[error("surface does not support the adapter's queue family")]
UnsupportedQueueFamily,
#[error("requested format {requested:?} is not in list of supported formats: {available:?}")]
UnsupportedFormat {
requested: hal::format::Format,
available: Vec<hal::format::Format>,
},
}
pub(crate) fn swap_chain_descriptor_to_hal(
desc: &SwapChainDescriptor,
num_frames: u32,
@ -89,16 +119,20 @@ pub struct SwapChainOutput {
pub view_id: Option<TextureViewId>,
}
#[error("swap chain is invalid")]
#[derive(Clone, Debug, Error)]
pub struct InvalidSwapChain;
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn swap_chain_get_preferred_format<B: GfxBackend>(
&self,
_swap_chain_id: SwapChainId,
) -> wgt::TextureFormat {
) -> Result<wgt::TextureFormat, InvalidSwapChain> {
span!(_guard, INFO, "SwapChain::get_next_texture");
//TODO: we can query the formats like done in `device_create_swapchain`,
// but its not clear which format in the list to return.
// For now, return `Bgra8UnormSrgb` that we know is supported everywhere.
wgt::TextureFormat::Bgra8UnormSrgb
Ok(wgt::TextureFormat::Bgra8UnormSrgb)
}
pub fn swap_chain_get_current_texture_view<B: GfxBackend>(
@ -112,10 +146,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let surface = surface_guard
.get_mut(swap_chain_id.to_surface_id())
.map_err(|_| SwapChainError::InvalidSurface)?;
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let sc = swap_chain_guard
.get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?;
#[cfg_attr(not(feature = "trace"), allow(unused_variables))]
let device = &device_guard[sc.device_id.value];
@ -126,16 +164,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Err(err) => (
None,
match err {
hal::window::AcquireError::OutOfMemory(_) => {
return Err(SwapChainError::OutOfMemory)
}
hal::window::AcquireError::OutOfMemory(_) => Err(DeviceError::OutOfMemory)?,
hal::window::AcquireError::NotReady => unreachable!(), // we always set a timeout
hal::window::AcquireError::Timeout => SwapChainStatus::Timeout,
hal::window::AcquireError::OutOfDate => SwapChainStatus::Outdated,
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
hal::window::AcquireError::DeviceLost(_) => {
return Err(SwapChainError::DeviceLost)
}
hal::window::AcquireError::DeviceLost(_) => Err(DeviceError::Lost)?,
},
),
};
@ -146,7 +180,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
inner: resource::TextureViewInner::SwapChain {
image,
source_id: Stored {
value: swap_chain_id,
value: Valid(swap_chain_id),
ref_count: sc.life_guard.add_ref(),
},
},
@ -179,7 +213,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
ref_count,
});
Some(id)
Some(id.0)
}
None => None,
};
@ -206,10 +240,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let surface = surface_guard
.get_mut(swap_chain_id.to_surface_id())
.map_err(|_| SwapChainError::InvalidSurface)?;
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let sc = swap_chain_guard
.get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?;
let device = &mut device_guard[sc.device_id.value];
#[cfg(feature = "trace")]
@ -222,7 +260,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.acquired_view_id
.take()
.ok_or(SwapChainError::AlreadyAcquired)?;
let (view, _) = hub.texture_views.unregister(view_id.value, &mut token);
let (view, _) = hub.texture_views.unregister(view_id.value.0, &mut token);
let image = match view.inner {
resource::TextureViewInner::Native { .. } => unreachable!(),
resource::TextureViewInner::SwapChain { image, .. } => image,
@ -248,21 +286,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Ok(None) => Ok(SwapChainStatus::Good),
Ok(Some(_)) => Ok(SwapChainStatus::Suboptimal),
Err(err) => match err {
hal::window::PresentError::OutOfMemory(_) => Err(SwapChainError::OutOfMemory),
hal::window::PresentError::OutOfMemory(_) => {
Err(SwapChainError::Device(DeviceError::OutOfMemory))
}
hal::window::PresentError::OutOfDate => Ok(SwapChainStatus::Outdated),
hal::window::PresentError::SurfaceLost(_) => Ok(SwapChainStatus::Lost),
hal::window::PresentError::DeviceLost(_) => Err(SwapChainError::DeviceLost),
hal::window::PresentError::DeviceLost(_) => {
Err(SwapChainError::Device(DeviceError::Lost))
}
},
}
}
}
#[derive(Clone, Debug, Error)]
pub enum SwapChainError {
#[error("swap chain image is already acquired")]
AlreadyAcquired,
#[error("Graphics backend is out of memory")]
OutOfMemory,
#[error("graphics backend device lost")]
DeviceLost,
}

View File

@ -3,7 +3,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{PendingTransition, ResourceState, Unit};
use crate::{id::BufferId, resource::BufferUse};
use crate::{
id::{BufferId, Valid},
resource::BufferUse,
};
//TODO: store `hal::buffer::State` here to avoid extra conversions
pub(crate) type BufferState = Unit<BufferUse>;
@ -47,7 +50,7 @@ impl ResourceState for BufferState {
fn change(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
_selector: Self::Selector,
usage: Self::Usage,
output: Option<&mut Vec<PendingTransition<Self>>>,
@ -81,7 +84,7 @@ impl ResourceState for BufferState {
fn prepend(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
_selector: Self::Selector,
usage: Self::Usage,
) -> Result<(), PendingTransition<Self>> {
@ -100,7 +103,7 @@ impl ResourceState for BufferState {
fn merge(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
other: &Self,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {

View File

@ -9,13 +9,14 @@ mod texture;
use crate::{
conv,
hub::Storage,
id::{self, TypedId},
id::{self, TypedId, Valid},
resource, Epoch, FastHashMap, Index, RefCount,
};
use std::{
borrow::Borrow, collections::hash_map::Entry, fmt, marker::PhantomData, ops, vec::Drain,
};
use thiserror::Error;
pub(crate) use buffer::BufferState;
pub(crate) use texture::TextureState;
@ -45,7 +46,7 @@ impl<U: Copy> Unit<U> {
/// The main trait that abstracts away the tracking logic of
/// a particular resource type, like a buffer or a texture.
pub trait ResourceState: Clone + Default {
pub(crate) trait ResourceState: Clone + Default {
/// Corresponding `HUB` identifier.
type Id: Copy + fmt::Debug + TypedId;
/// A type specifying the sub-resources.
@ -74,7 +75,7 @@ pub trait ResourceState: Clone + Default {
/// be done for read-only usages.
fn change(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
output: Option<&mut Vec<PendingTransition<Self>>>,
@ -83,7 +84,7 @@ pub trait ResourceState: Clone + Default {
/// Sets up the first usage of the selected sub-resources.
fn prepend(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
) -> Result<(), PendingTransition<Self>>;
@ -98,7 +99,7 @@ pub trait ResourceState: Clone + Default {
/// the error is generated (returning the conflict).
fn merge(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
other: &Self,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>>;
@ -120,8 +121,8 @@ struct Resource<S> {
/// transition. User code should be able to generate a pipeline barrier
/// based on the contents.
#[derive(Debug, PartialEq)]
pub struct PendingTransition<S: ResourceState> {
pub id: S::Id,
pub(crate) struct PendingTransition<S: ResourceState> {
pub id: Valid<S::Id>,
pub selector: S::Selector,
pub usage: ops::Range<S::Usage>,
}
@ -164,8 +165,16 @@ impl PendingTransition<TextureState> {
}
}
#[derive(Clone, Debug, Error)]
pub enum UseExtendError<U: fmt::Debug> {
#[error("resource is invalid")]
InvalidResource,
#[error("total usage {0:?} is not valid")]
Conflict(U),
}
/// A tracker for all resources of a given type.
pub struct ResourceTracker<S: ResourceState> {
pub(crate) struct ResourceTracker<S: ResourceState> {
/// An association of known resource indices with their tracked states.
map: FastHashMap<Index, Resource<S>>,
/// Temporary storage for collecting transitions.
@ -195,8 +204,8 @@ impl<S: ResourceState> ResourceTracker<S> {
}
/// Remove an id from the tracked map.
pub(crate) fn remove(&mut self, id: S::Id) -> bool {
let (index, epoch, backend) = id.unzip();
pub(crate) fn remove(&mut self, id: Valid<S::Id>) -> bool {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.remove(&index) {
Some(resource) => {
@ -208,8 +217,8 @@ impl<S: ResourceState> ResourceTracker<S> {
}
/// Removes the resource from the tracker if we are holding the last reference.
pub(crate) fn remove_abandoned(&mut self, id: S::Id) -> bool {
let (index, epoch, backend) = id.unzip();
pub(crate) fn remove_abandoned(&mut self, id: Valid<S::Id>) -> bool {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.entry(index) {
Entry::Occupied(e) => {
@ -233,11 +242,11 @@ impl<S: ResourceState> ResourceTracker<S> {
}
/// Return an iterator over used resources keys.
pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = S::Id> {
pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = Valid<S::Id>> {
let backend = self.backend;
self.map
.iter()
.map(move |(&index, resource)| S::Id::zip(index, resource.epoch, backend))
.map(move |(&index, resource)| Valid(S::Id::zip(index, resource.epoch, backend)))
}
/// Clear the tracked contents.
@ -248,8 +257,13 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Initialize a resource to be used.
///
/// Returns false if the resource is already registered.
pub(crate) fn init(&mut self, id: S::Id, ref_count: RefCount, state: S) -> Result<(), &S> {
let (index, epoch, backend) = id.unzip();
pub(crate) fn init(
&mut self,
id: Valid<S::Id>,
ref_count: RefCount,
state: S,
) -> Result<(), &S> {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.entry(index) {
Entry::Vacant(e) => {
@ -268,8 +282,8 @@ impl<S: ResourceState> ResourceTracker<S> {
///
/// Returns `Some(Usage)` only if this usage is consistent
/// across the given selector.
pub fn query(&self, id: S::Id, selector: S::Selector) -> Option<S::Usage> {
let (index, epoch, backend) = id.unzip();
pub fn query(&self, id: Valid<S::Id>, selector: S::Selector) -> Option<S::Usage> {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
let res = self.map.get(&index)?;
assert_eq!(res.epoch, epoch);
@ -281,10 +295,10 @@ impl<S: ResourceState> ResourceTracker<S> {
fn get_or_insert<'a>(
self_backend: wgt::Backend,
map: &'a mut FastHashMap<Index, Resource<S>>,
id: S::Id,
id: Valid<S::Id>,
ref_count: &RefCount,
) -> &'a mut Resource<S> {
let (index, epoch, backend) = id.unzip();
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(self_backend, backend);
match map.entry(index) {
Entry::Vacant(e) => e.insert(Resource {
@ -304,7 +318,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Returns conflicting transition as an error.
pub(crate) fn change_extend(
&mut self,
id: S::Id,
id: Valid<S::Id>,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
@ -317,7 +331,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Replace the usage of a specified resource.
pub(crate) fn change_replace(
&mut self,
id: S::Id,
id: Valid<S::Id>,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
@ -334,7 +348,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// This is a special operation only used by the render pass attachments.
pub(crate) fn prepend(
&mut self,
id: S::Id,
id: Valid<S::Id>,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
@ -355,7 +369,7 @@ impl<S: ResourceState> ResourceTracker<S> {
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch);
let id = S::Id::zip(index, new.epoch, self.backend);
let id = Valid(S::Id::zip(index, new.epoch, self.backend));
e.into_mut().state.merge(id, &new.state, None)?;
}
}
@ -373,7 +387,7 @@ impl<S: ResourceState> ResourceTracker<S> {
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch);
let id = S::Id::zip(index, new.epoch, self.backend);
let id = Valid(S::Id::zip(index, new.epoch, self.backend));
e.into_mut()
.state
.merge(id, &new.state, Some(&mut self.temp))
@ -395,11 +409,13 @@ impl<S: ResourceState> ResourceTracker<S> {
id: S::Id,
selector: S::Selector,
usage: S::Usage,
) -> Result<&'a T, S::Usage> {
let item = &storage[id];
self.change_extend(id, item.borrow(), selector, usage)
) -> Result<&'a T, UseExtendError<S::Usage>> {
let item = storage
.get(id)
.map_err(|_| UseExtendError::InvalidResource)?;
self.change_extend(Valid(id), item.borrow(), selector, usage)
.map(|()| item)
.map_err(|pending| pending.usage.start)
.map_err(|pending| UseExtendError::Conflict(pending.usage.end))
}
/// Use a given resource provided by an `Id` with the specified usage.
@ -412,10 +428,10 @@ impl<S: ResourceState> ResourceTracker<S> {
id: S::Id,
selector: S::Selector,
usage: S::Usage,
) -> (&'a T, Drain<PendingTransition<S>>) {
let item = &storage[id];
let drain = self.change_replace(id, item.borrow(), selector, usage);
(item, drain)
) -> Result<(&'a T, Drain<PendingTransition<S>>), S::Id> {
let item = storage.get(id).map_err(|_| id)?;
let drain = self.change_replace(Valid(id), item.borrow(), selector, usage);
Ok((item, drain))
}
}
@ -430,7 +446,7 @@ impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
fn change(
&mut self,
_id: Self::Id,
_id: Valid<Self::Id>,
_selector: Self::Selector,
_usage: Self::Usage,
_output: Option<&mut Vec<PendingTransition<Self>>>,
@ -440,7 +456,7 @@ impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
fn prepend(
&mut self,
_id: Self::Id,
_id: Valid<Self::Id>,
_selector: Self::Selector,
_usage: Self::Usage,
) -> Result<(), PendingTransition<Self>> {
@ -449,7 +465,7 @@ impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
fn merge(
&mut self,
_id: Self::Id,
_id: Valid<Self::Id>,
_other: &Self,
_output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {

View File

@ -3,7 +3,11 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{range::RangedStates, PendingTransition, ResourceState, Unit};
use crate::{device::MAX_MIP_LEVELS, id::TextureId, resource::TextureUse};
use crate::{
device::MAX_MIP_LEVELS,
id::{TextureId, Valid},
resource::TextureUse,
};
use arrayvec::ArrayVec;
@ -79,7 +83,7 @@ impl ResourceState for TextureState {
fn change(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
mut output: Option<&mut Vec<PendingTransition<Self>>>,
@ -138,7 +142,7 @@ impl ResourceState for TextureState {
fn prepend(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
) -> Result<(), PendingTransition<Self>> {
@ -173,7 +177,7 @@ impl ResourceState for TextureState {
fn merge(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
other: &Self,
mut output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {

View File

@ -84,6 +84,8 @@ pub enum InputError {
/// Errors produced when validating a programmable stage of a pipeline.
#[derive(Clone, Debug, Error)]
pub enum StageError {
#[error("shader module is invalid")]
InvalidModule,
#[error("unable to find an entry point matching the {0:?} execution model")]
MissingEntryPoint(naga::ShaderStage),
#[error("error matching global binding at index {binding} in set {set} against the pipeline layout: {error}")]