77: Storage indexing, framebuffer tracking, and a bunch of validation fixes r=grovesNL a=kvark

Closes #73
Closes #75
Closes #79

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
Co-authored-by: Dzmitry Malyshau <dmalyshau@mozilla.com>
This commit is contained in:
bors[bot] 2019-02-27 13:34:12 +00:00
commit 26bc6955f8
18 changed files with 425 additions and 394 deletions

View File

@ -48,7 +48,7 @@ clear:
rm wgpu-bindings/wgpu.h
lib-native: Cargo.lock wgpu-native/Cargo.toml $(wildcard wgpu-native/**/*.rs)
cargo build --manifest-path wgpu-native/Cargo.toml --features $(FEATURE_NATIVE)
cargo build --manifest-path wgpu-native/Cargo.toml --features "local,$(FEATURE_NATIVE)"
lib-rust: Cargo.lock wgpu-rs/Cargo.toml $(wildcard wgpu-rs/**/*.rs)
cargo build --manifest-path wgpu-rs/Cargo.toml --features $(FEATURE_RUST)

View File

@ -40,7 +40,7 @@ fn main() {
cull_mode: wgpu::CullMode::None,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: wgpu::MAX_DEPTH_BIAS_CLAMP,
depth_bias_clamp: 0.0,
},
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[

View File

@ -280,7 +280,7 @@ impl framework::Example for Example {
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: wgpu::MAX_DEPTH_BIAS_CLAMP,
depth_bias_clamp: 0.0,
},
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[

View File

@ -219,7 +219,7 @@ impl framework::Example for Example {
});
let local_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStageFlags::VERTEX | wgpu::ShaderStageFlags::FRAGMENT,
@ -380,7 +380,7 @@ impl framework::Example for Example {
let light_uniform_size = (Self::MAX_LIGHTS * mem::size_of::<LightRaw>()) as u32;
let light_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
size: light_uniform_size,
usage: wgpu::BufferUsageFlags::UNIFORM | wgpu::BufferUsageFlags::TRANSFER_DST,
usage: wgpu::BufferUsageFlags::UNIFORM | wgpu::BufferUsageFlags::TRANSFER_SRC | wgpu::BufferUsageFlags::TRANSFER_DST,
});
let vb_desc = wgpu::VertexBufferDescriptor {
@ -459,7 +459,7 @@ impl framework::Example for Example {
cull_mode: wgpu::CullMode::Back,
depth_bias: 2, // corresponds to bilinear filtering
depth_bias_slope_scale: 2.0,
depth_bias_clamp: wgpu::MAX_DEPTH_BIAS_CLAMP,
depth_bias_clamp: 0.0,
},
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[],
@ -579,7 +579,7 @@ impl framework::Example for Example {
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: wgpu::MAX_DEPTH_BIAS_CLAMP,
depth_bias_clamp: 0.0,
},
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[

View File

@ -5,8 +5,6 @@
#define WGPUBITS_PER_BYTE 8
#define WGPUMAX_DEPTH_BIAS_CLAMP 16
typedef enum {
WGPUAddressMode_ClampToEdge = 0,
WGPUAddressMode_Repeat = 1,
@ -169,6 +167,8 @@ typedef struct {
WGPUExtensions extensions;
} WGPUDeviceDescriptor;
typedef WGPUId WGPUBindGroupId;
typedef WGPUId WGPUBufferId;
typedef void (*WGPUBufferMapReadCallback)(WGPUBufferMapAsyncStatus status, const uint8_t *data, uint8_t *userdata);
@ -243,8 +243,6 @@ typedef struct {
const WGPURenderPassDepthStencilAttachmentDescriptor_TextureViewId *depth_stencil_attachment;
} WGPURenderPassDescriptor;
typedef WGPUId WGPUBindGroupId;
typedef WGPUId WGPUComputePipelineId;
typedef WGPUId WGPUInstanceId;
@ -557,6 +555,8 @@ typedef struct {
WGPUDeviceId wgpu_adapter_create_device(WGPUAdapterId adapter_id, const WGPUDeviceDescriptor *desc);
void wgpu_bind_group_destroy(WGPUBindGroupId bind_group_id);
void wgpu_buffer_destroy(WGPUBufferId buffer_id);
void wgpu_buffer_map_read_async(WGPUBufferId buffer_id,

View File

@ -1,6 +1,6 @@
use crate::command::bind::{Binder};
use crate::hub::HUB;
use crate::track::TrackerSet;
use crate::track::{Stitch, TrackerSet};
use crate::{
Stored, CommandBuffer,
BindGroupId, CommandBufferId, ComputePassId, ComputePipelineId,
@ -38,8 +38,7 @@ pub extern "C" fn wgpu_compute_pass_end_pass(pass_id: ComputePassId) -> CommandB
HUB.command_buffers
.write()
.get_mut(pass.cmb_id.value)
.raw
[pass.cmb_id.value].raw
.push(pass.raw);
pass.cmb_id.value
}
@ -49,8 +48,7 @@ pub extern "C" fn wgpu_compute_pass_dispatch(pass_id: ComputePassId, x: u32, y:
unsafe {
HUB.compute_passes
.write()
.get_mut(pass_id)
.raw
[pass_id].raw
.dispatch([x, y, z]);
}
}
@ -62,29 +60,27 @@ pub extern "C" fn wgpu_compute_pass_set_bind_group(
bind_group_id: BindGroupId,
) {
let mut pass_guard = HUB.compute_passes.write();
let pass = pass_guard.get_mut(pass_id);
let pass = &mut pass_guard[pass_id];
let bind_group_guard = HUB.bind_groups.read();
let bind_group = bind_group_guard.get(bind_group_id);
let bind_group = &bind_group_guard[bind_group_id];
//Note: currently, WebGPU compute passes have synchronization defined
// at a dispatch granularity, so we insert the necessary barriers here.
//TODO: have `TrackerSet::consume()` ?
CommandBuffer::insert_barriers(
&mut pass.raw,
pass.trackers.buffers.consume_by_replace(&bind_group.used.buffers),
pass.trackers.textures.consume_by_replace(&bind_group.used.textures),
&mut pass.trackers,
&bind_group.used,
Stitch::Last,
&*HUB.buffers.read(),
&*HUB.textures.read(),
);
pass.trackers.views.consume(&bind_group.used.views);
if let Some(pipeline_layout_id) = pass.binder.provide_entry(index as usize, bind_group_id, bind_group) {
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline_layout_id);
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
iter::once(&bind_group.raw),
&[],
@ -99,9 +95,9 @@ pub extern "C" fn wgpu_compute_pass_set_pipeline(
pipeline_id: ComputePipelineId,
) {
let mut pass_guard = HUB.compute_passes.write();
let pass = pass_guard.get_mut(pass_id);
let pass = &mut pass_guard[pass_id];
let pipeline_guard = HUB.compute_pipelines.read();
let pipeline = pipeline_guard.get(pipeline_id);
let pipeline = &pipeline_guard[pipeline_id];
unsafe {
pass.raw.bind_compute_pipeline(&pipeline.raw);
@ -112,7 +108,7 @@ pub extern "C" fn wgpu_compute_pass_set_pipeline(
}
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline.layout_id);
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
let bing_group_guard = HUB.bind_groups.read();
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
@ -124,12 +120,12 @@ pub extern "C" fn wgpu_compute_pass_set_pipeline(
.enumerate()
{
if let Some(bg_id) = entry.expect_layout(bgl_id) {
let bind_group = bing_group_guard.get(bg_id);
let desc_set = &bing_group_guard[bg_id].raw;
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(&bind_group.raw),
iter::once(desc_set),
&[]
);
}

View File

@ -14,14 +14,15 @@ use crate::device::{
all_buffer_stages, all_image_stages,
};
use crate::hub::{HUB, Storage};
use crate::resource::TexturePlacement;
use crate::swap_chain::{SwapChainLink, SwapImageEpoch};
use crate::track::TrackerSet;
use crate::track::{DummyUsage, Stitch, TrackerSet};
use crate::conv;
use crate::{
BufferHandle, TextureHandle,
BufferId, CommandBufferId, CommandEncoderId, DeviceId,
TextureId, TextureViewId,
BufferUsageFlags, TextureUsageFlags, Color,
CommandBufferId, CommandEncoderId, DeviceId,
TextureViewId,
TextureUsageFlags, Color,
LifeGuard, Stored,
CommandBufferHandle,
};
@ -34,7 +35,6 @@ use hal::{Device as _Device};
use log::trace;
use std::collections::hash_map::Entry;
use std::ops::Range;
use std::{iter, slice};
use std::thread::ThreadId;
@ -90,39 +90,43 @@ pub struct CommandBuffer<B: hal::Backend> {
}
impl CommandBufferHandle {
pub(crate) fn insert_barriers<I, J>(
pub(crate) fn insert_barriers(
raw: &mut <Backend as hal::Backend>::CommandBuffer,
buffer_iter: I,
texture_iter: J,
base: &mut TrackerSet,
head: &TrackerSet,
stitch: Stitch,
buffer_guard: &Storage<BufferHandle>,
texture_guard: &Storage<TextureHandle>,
) where
I: Iterator<Item = (BufferId, Range<BufferUsageFlags>)>,
J: Iterator<Item = (TextureId, Range<TextureUsageFlags>)>,
{
let buffer_barriers = buffer_iter.map(|(id, transit)| {
let b = buffer_guard.get(id);
trace!("transit {:?} {:?}", id, transit);
hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(transit.start) .. conv::map_buffer_state(transit.end),
target: &b.raw,
range: None .. None,
families: None,
}
});
let texture_barriers = texture_iter.map(|(id, transit)| {
let t = texture_guard.get(id);
trace!("transit {:?} {:?}", id, transit);
let aspects = t.full_range.aspects;
hal::memory::Barrier::Image {
states: conv::map_texture_state(transit.start, aspects)
..conv::map_texture_state(transit.end, aspects),
target: &t.raw,
range: t.full_range.clone(), //TODO?
families: None,
}
});
) {
let buffer_barriers = base.buffers
.consume_by_replace(&head.buffers, stitch)
.map(|(id, transit)| {
let b = &buffer_guard[id];
trace!("transit buffer {:?} {:?}", id, transit);
hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(transit.start) .. conv::map_buffer_state(transit.end),
target: &b.raw,
range: None .. None,
families: None,
}
});
let texture_barriers = base.textures
.consume_by_replace(&head.textures, stitch)
.map(|(id, transit)| {
let t = &texture_guard[id];
trace!("transit texture {:?} {:?}", id, transit);
let aspects = t.full_range.aspects;
hal::memory::Barrier::Image {
states: conv::map_texture_state(transit.start, aspects)
..conv::map_texture_state(transit.end, aspects),
target: &t.raw,
range: t.full_range.clone(), //TODO?
families: None,
}
});
base.views
.consume_by_extend(&head.views)
.unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
@ -148,7 +152,7 @@ pub extern "C" fn wgpu_command_encoder_finish(
) -> CommandBufferId {
HUB.command_buffers
.write()
.get_mut(command_encoder_id)
[command_encoder_id]
.is_recording = false; //TODO: check for the old value
command_encoder_id
}
@ -158,9 +162,9 @@ pub fn command_encoder_begin_render_pass(
desc: RenderPassDescriptor,
) -> RenderPass<Backend> {
let mut cmb_guard = HUB.command_buffers.write();
let cmb = cmb_guard.get_mut(command_encoder_id);
let cmb = &mut cmb_guard[command_encoder_id];
let device_guard = HUB.devices.read();
let device = device_guard.get(cmb.device_id.value);
let device = &device_guard[cmb.device_id.value];
let view_guard = HUB.texture_views.read();
let mut current_comb = device.com_allocator.extend(cmb);
@ -187,13 +191,13 @@ pub fn command_encoder_begin_render_pass(
let swap_chain_links = &mut cmb.swap_chain_links;
let depth_stencil_key = depth_stencil_attachment.map(|at| {
let view = view_guard.get(at.attachment);
let view = &view_guard[at.attachment];
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
} else {
extent = Some(view.extent);
}
trackers.views.query(at.attachment, &view.life_guard.ref_count);
trackers.views.query(at.attachment, &view.life_guard.ref_count, DummyUsage);
let query = trackers.textures.query(
view.texture_id.value,
&view.texture_id.ref_count,
@ -213,20 +217,20 @@ pub fn command_encoder_begin_render_pass(
});
let color_keys = color_attachments.iter().map(|at| {
let view = view_guard.get(at.attachment);
let view = &view_guard[at.attachment];
if view.is_owned_by_swap_chain {
let link = match HUB.textures
.read()
.get(view.texture_id.value)
.swap_chain_link
[view.texture_id.value].placement
{
Some(ref link) => SwapChainLink {
TexturePlacement::SwapChain(ref link) => SwapChainLink {
swap_chain_id: link.swap_chain_id.clone(),
epoch: *link.epoch.lock(),
image_index: link.image_index,
},
None => unreachable!()
TexturePlacement::Memory(_) |
TexturePlacement::Void => unreachable!()
};
swap_chain_links.push(link);
}
@ -236,7 +240,7 @@ pub fn command_encoder_begin_render_pass(
} else {
extent = Some(view.extent);
}
trackers.views.query(at.attachment, &view.life_guard.ref_count);
trackers.views.query(at.attachment, &view.life_guard.ref_count, DummyUsage);
let query = trackers.textures.query(
view.texture_id.value,
&view.texture_id.ref_count,
@ -308,7 +312,7 @@ pub fn command_encoder_begin_render_pass(
.key()
.attachments
.iter()
.map(|&id| &view_guard.get(id).raw);
.map(|&id| &view_guard[id].raw);
unsafe {
device
@ -381,7 +385,7 @@ pub fn command_encoder_begin_compute_pass(
command_encoder_id: CommandEncoderId,
) -> ComputePass<Backend> {
let mut cmb_guard = HUB.command_buffers.write();
let cmb = cmb_guard.get_mut(command_encoder_id);
let cmb = &mut cmb_guard[command_encoder_id];
let raw = cmb.raw.pop().unwrap();
let stored = Stored {

View File

@ -1,7 +1,7 @@
use crate::command::bind::Binder;
use crate::hub::HUB;
use crate::resource::BufferUsageFlags;
use crate::track::TrackerSet;
use crate::track::{Stitch, TrackerSet};
use crate::{
CommandBuffer, Stored,
BindGroupId, BufferId, CommandBufferId, RenderPassId, RenderPipelineId,
@ -38,29 +38,24 @@ pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBuf
}
let mut cmb_guard = HUB.command_buffers.write();
let cmb = cmb_guard.get_mut(pass.cmb_id.value);
let cmb = &mut cmb_guard[pass.cmb_id.value];
match cmb.raw.last_mut() {
Some(ref mut last) => {
CommandBuffer::insert_barriers(
last,
cmb.trackers.buffers.consume_by_replace(&pass.trackers.buffers),
cmb.trackers.textures.consume_by_replace(&pass.trackers.textures),
&mut cmb.trackers,
&pass.trackers,
Stitch::Last,
&*HUB.buffers.read(),
&*HUB.textures.read(),
);
unsafe { last.finish() };
}
None => {
cmb.trackers.buffers
.consume_by_extend(&pass.trackers.buffers)
.unwrap();
cmb.trackers.textures
.consume_by_extend(&pass.trackers.textures)
.unwrap();
cmb.trackers.consume_by_extend(&pass.trackers);
}
}
cmb.trackers.views.consume(&pass.trackers.views);
cmb.raw.push(pass.raw);
pass.cmb_id.value
@ -73,7 +68,7 @@ pub extern "C" fn wgpu_render_pass_set_index_buffer(
let mut pass_guard = HUB.render_passes.write();
let buffer_guard = HUB.buffers.read();
let pass = pass_guard.get_mut(pass_id);
let pass = &mut pass_guard[pass_id];
let buffer = pass.trackers.buffers
.get_with_extended_usage(
&*buffer_guard,
@ -109,7 +104,7 @@ pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
slice::from_raw_parts(offset_ptr, count)
};
let pass = pass_guard.get_mut(pass_id);
let pass = &mut pass_guard[pass_id];
for &id in buffers {
pass.trackers.buffers
.get_with_extended_usage(
@ -122,7 +117,7 @@ pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
let buffers = buffers
.iter()
.map(|&id| &buffer_guard.get(id).raw)
.map(|&id| &buffer_guard[id].raw)
.zip(offsets.iter().map(|&off| off as u64));
unsafe {
@ -141,8 +136,7 @@ pub extern "C" fn wgpu_render_pass_draw(
unsafe {
HUB.render_passes
.write()
.get_mut(pass_id)
.raw
[pass_id].raw
.draw(
first_vertex .. first_vertex + vertex_count,
first_instance .. first_instance + instance_count,
@ -162,8 +156,7 @@ pub extern "C" fn wgpu_render_pass_draw_indexed(
unsafe {
HUB.render_passes
.write()
.get_mut(pass_id)
.raw
[pass_id].raw
.draw_indexed(
first_index .. first_index + index_count,
base_vertex,
@ -179,21 +172,15 @@ pub extern "C" fn wgpu_render_pass_set_bind_group(
bind_group_id: BindGroupId,
) {
let mut pass_guard = HUB.render_passes.write();
let pass = pass_guard.get_mut(pass_id);
let pass = &mut pass_guard[pass_id];
let bind_group_guard = HUB.bind_groups.read();
let bind_group = bind_group_guard.get(bind_group_id);
let bind_group = &bind_group_guard[bind_group_id];
pass.trackers.buffers
.consume_by_extend(&bind_group.used.buffers)
.unwrap();
pass.trackers.textures
.consume_by_extend(&bind_group.used.textures)
.unwrap();
pass.trackers.views.consume(&bind_group.used.views);
pass.trackers.consume_by_extend(&bind_group.used);
if let Some(pipeline_layout_id) = pass.binder.provide_entry(index as usize, bind_group_id, bind_group) {
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline_layout_id);
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&pipeline_layout.raw,
@ -211,9 +198,9 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
pipeline_id: RenderPipelineId,
) {
let mut pass_guard = HUB.render_passes.write();
let pass = pass_guard.get_mut(pass_id);
let pass = &mut pass_guard[pass_id];
let pipeline_guard = HUB.render_pipelines.read();
let pipeline = pipeline_guard.get(pipeline_id);
let pipeline = &pipeline_guard[pipeline_id];
unsafe {
pass.raw.bind_graphics_pipeline(&pipeline.raw);
@ -224,8 +211,8 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
}
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline.layout_id);
let bing_group_guard = HUB.bind_groups.read();
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
let bind_group_guard = HUB.bind_groups.read();
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder.ensure_length(pipeline_layout.bind_group_layout_ids.len());
@ -236,12 +223,12 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
.enumerate()
{
if let Some(bg_id) = entry.expect_layout(bgl_id) {
let bind_group = bing_group_guard.get(bg_id);
let desc_set = &bind_group_guard[bg_id].raw;
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(&bind_group.raw),
iter::once(desc_set),
&[]
);
}

View File

@ -1,5 +1,6 @@
use crate::device::{all_buffer_stages, all_image_stages};
use crate::hub::HUB;
use crate::resource::TexturePlacement;
use crate::swap_chain::SwapChainLink;
use crate::conv;
use crate::{
@ -42,7 +43,7 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_buffer(
size: u32,
) {
let mut cmb_guard = HUB.command_buffers.write();
let cmb = cmb_guard.get_mut(command_buffer_id);
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let (src_buffer, src_usage) = cmb.trackers.buffers
@ -101,7 +102,7 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_texture(
copy_size: Extent3d,
) {
let mut cmb_guard = HUB.command_buffers.write();
let cmb = cmb_guard.get_mut(command_buffer_id);
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
@ -135,7 +136,7 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_texture(
range: dst_texture.full_range.clone(),
});
if let Some(ref link) = dst_texture.swap_chain_link {
if let TexturePlacement::SwapChain(ref link) = dst_texture.placement {
cmb.swap_chain_links.push(SwapChainLink {
swap_chain_id: link.swap_chain_id.clone(),
epoch: *link.epoch.lock(),
@ -184,7 +185,7 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_buffer(
copy_size: Extent3d,
) {
let mut cmb_guard = HUB.command_buffers.write();
let cmb = cmb_guard.get_mut(command_buffer_id);
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
@ -203,7 +204,11 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_buffer(
families: None,
range: src_texture.full_range.clone(),
});
assert!(src_texture.swap_chain_link.is_none()); //TODO
match src_texture.placement {
TexturePlacement::SwapChain(_) => unimplemented!(),
TexturePlacement::Void => unreachable!(),
TexturePlacement::Memory(_) => (),
}
let (dst_buffer, dst_usage) = cmb.trackers.buffers
.get_with_replaced_usage(
@ -260,7 +265,7 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_texture(
copy_size: Extent3d,
) {
let mut cmb_guard = HUB.command_buffers.write();
let cmb = cmb_guard.get_mut(command_buffer_id);
let cmb = &mut cmb_guard[command_buffer_id];
let texture_guard = HUB.textures.read();
let (src_texture, src_usage) = cmb.trackers.textures
@ -295,7 +300,7 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_texture(
range: dst_texture.full_range.clone(),
});
if let Some(ref link) = dst_texture.swap_chain_link {
if let TexturePlacement::SwapChain(ref link) = dst_texture.placement {
cmb.swap_chain_links.push(SwapChainLink {
swap_chain_id: link.swap_chain_id.clone(),
epoch: *link.epoch.lock(),

View File

@ -1,7 +1,6 @@
use crate::{
binding_model, command, pipeline, resource, Color,
Extent3d, Origin3d,
MAX_DEPTH_BIAS_CLAMP,
};
@ -491,7 +490,7 @@ pub fn map_rasterization_state_descriptor(
pipeline::FrontFace::Ccw => hal::pso::FrontFace::CounterClockwise,
pipeline::FrontFace::Cw => hal::pso::FrontFace::Clockwise,
},
depth_bias: if desc.depth_bias != 0 || desc.depth_bias_slope_scale != 0.0 || desc.depth_bias_clamp < MAX_DEPTH_BIAS_CLAMP {
depth_bias: if desc.depth_bias != 0 || desc.depth_bias_slope_scale != 0.0 || desc.depth_bias_clamp != 0.0 {
Some(hal::pso::State::Static(hal::pso::DepthBias {
const_factor: desc.depth_bias as f32,
slope_factor: desc.depth_bias_slope_scale,

View File

@ -1,18 +1,18 @@
use crate::{binding_model, command, conv, pipeline, resource, swap_chain};
use crate::hub::HUB;
use crate::track::{TrackerSet, TrackPermit};
use crate::track::{DummyUsage, Stitch, TrackerSet, TrackPermit};
use crate::{
LifeGuard, RefCount, Stored, SubmissionIndex,
BufferMapAsyncStatus, BufferMapOperation,
};
use crate::{
BufferId, CommandBufferId, AdapterId, DeviceId, QueueId,
TextureId, TextureViewId, SurfaceId,
BindGroupId, TextureId, TextureViewId, SurfaceId,
};
#[cfg(feature = "local")]
use crate::{
BindGroupId, BindGroupLayoutId, PipelineLayoutId, SamplerId, SwapChainId,
ShaderModuleId, CommandEncoderId, RenderPipelineId, ComputePipelineId,
BindGroupLayoutId, PipelineLayoutId, SamplerId, SwapChainId,
ShaderModuleId, CommandEncoderId, RenderPipelineId, ComputePipelineId,
};
use arrayvec::ArrayVec;
@ -64,7 +64,7 @@ pub(crate) struct RenderPassKey {
}
impl Eq for RenderPassKey {}
#[derive(Hash, PartialEq)]
#[derive(Clone, Hash, PartialEq)]
pub(crate) struct FramebufferKey {
pub attachments: AttachmentVec<TextureViewId>,
}
@ -77,16 +77,19 @@ enum ResourceId {
TextureView(TextureViewId),
}
enum Resource<B: hal::Backend> {
Buffer(resource::Buffer<B>),
Texture(resource::Texture<B>),
TextureView(resource::TextureView<B>),
enum NativeResource<B: hal::Backend> {
Buffer(B::Buffer, B::Memory),
Image(B::Image, B::Memory),
ImageView(B::ImageView),
Framebuffer(B::Framebuffer),
}
struct ActiveSubmission<B: hal::Backend> {
index: SubmissionIndex,
fence: B::Fence,
resources: Vec<Resource<B>>,
// Note: we keep the associated ID here in order to be able to check
// at any point what resources are used in a submission.
resources: Vec<(Option<ResourceId>, NativeResource<B>)>,
mapped: Vec<BufferId>,
}
@ -101,7 +104,7 @@ struct DestroyedResources<B: hal::Backend> {
active: Vec<ActiveSubmission<B>>,
/// Resources that are neither referenced or used, just pending
/// actual deletion.
free: Vec<Resource<B>>,
free: Vec<NativeResource<B>>,
ready_to_map: Vec<BufferId>,
}
@ -119,32 +122,37 @@ impl<B: hal::Backend> DestroyedResources<B> {
}
/// Returns the last submission index that is done.
fn cleanup(&mut self, raw: &B::Device) -> SubmissionIndex {
fn cleanup(&mut self, device: &B::Device) -> SubmissionIndex {
let mut last_done = 0;
for i in (0..self.active.len()).rev() {
if unsafe {
raw.get_fence_status(&self.active[i].fence).unwrap()
device.get_fence_status(&self.active[i].fence).unwrap()
} {
let a = self.active.swap_remove(i);
last_done = last_done.max(a.index);
self.free.extend(a.resources);
self.free.extend(a.resources.into_iter().map(|(_, r)| r));
unsafe {
raw.destroy_fence(a.fence);
device.destroy_fence(a.fence);
}
}
}
for resource in self.free.drain(..) {
match resource {
Resource::Buffer(buf) => unsafe {
raw.destroy_buffer(buf.raw)
NativeResource::Buffer(raw, memory) => unsafe {
device.destroy_buffer(raw);
device.free_memory(memory);
},
Resource::Texture(tex) => unsafe {
raw.destroy_image(tex.raw)
NativeResource::Image(raw, memory) => unsafe {
device.destroy_image(raw);
device.free_memory(memory);
},
Resource::TextureView(view) => unsafe {
raw.destroy_image_view(view.raw)
NativeResource::ImageView(raw) => unsafe {
device.destroy_image_view(raw);
},
NativeResource::Framebuffer(raw) => unsafe {
device.destroy_framebuffer(raw);
},
}
}
@ -167,32 +175,36 @@ impl DestroyedResources<back::Backend> {
if num_refs <= 3 {
let resource_id = self.referenced.swap_remove(i).0;
assert_eq!(num_refs, 3, "Resource {:?} misses some references", resource_id);
let (submit_index, resource) = match resource_id {
let (life_guard, resource) = match resource_id {
ResourceId::Buffer(id) => {
trackers.buffers.remove(id);
let buf = HUB.buffers.unregister(id);
let si = buf.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::Buffer(buf))
(buf.life_guard, NativeResource::Buffer(buf.raw, buf.memory))
}
ResourceId::Texture(id) => {
trackers.textures.remove(id);
let tex = HUB.textures.unregister(id);
let si = tex.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::Texture(tex))
let memory = match tex.placement {
// swapchain-owned images don't need explicit destruction
resource::TexturePlacement::SwapChain(_) => continue,
resource::TexturePlacement::Memory(mem) => mem,
resource::TexturePlacement::Void => unreachable!(),
};
(tex.life_guard, NativeResource::Image(tex.raw, memory))
}
ResourceId::TextureView(id) => {
trackers.views.remove(id);
let view = HUB.texture_views.unregister(id);
let si = view.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::TextureView(view))
(view.life_guard, NativeResource::ImageView(view.raw))
}
};
match self
.active
let submit_index = life_guard.submission_index.load(Ordering::Acquire);
match self.active
.iter_mut()
.find(|a| a.index == submit_index)
{
Some(a) => a.resources.push(resource),
Some(a) => a.resources.push((Some(resource_id), resource)),
None => self.free.push(resource),
}
}
@ -207,16 +219,51 @@ impl DestroyedResources<back::Backend> {
if num_refs <= 4 {
// assert_eq!(num_refs, 4);
let resource_id = self.mapped.swap_remove(i).value;
let buf = buffer_guard.get(resource_id);
let buf = &buffer_guard[resource_id];
let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
match self
.active
self.active
.iter_mut()
.find(|a| a.index == submit_index)
{
Some(a) => a.mapped.push(resource_id),
None => self.ready_to_map.push(resource_id),
.map_or(&mut self.ready_to_map, |a| &mut a.mapped)
.push(resource_id);
}
}
}
fn triage_framebuffers(
&mut self,
framebuffers: &mut FastHashMap<FramebufferKey, <back::Backend as hal::Backend>::Framebuffer>,
) {
let texture_view_guard = HUB.texture_views.read();
let remove_list = framebuffers
.keys()
.filter_map(|key| {
let mut last_submit: SubmissionIndex = 0;
for &at in &key.attachments {
if texture_view_guard.contains(at) {
return None
}
// This attachment is no longer registered.
// Let's see if it's used by any of the active submissions.
let res_id = &Some(ResourceId::TextureView(at));
for a in &self.active {
if a.resources.iter().any(|&(ref id, _)| id == res_id) {
last_submit = last_submit.max(a.index);
}
}
}
Some((key.clone(), last_submit))
})
.collect::<FastHashMap<_,_>>();
for (ref key, submit_index) in remove_list {
let resource = NativeResource::Framebuffer(framebuffers.remove(key).unwrap());
match self.active
.iter_mut()
.find(|a| a.index == submit_index)
{
Some(a) => a.resources.push((None, resource)),
None => self.free.push(resource),
}
}
}
@ -225,7 +272,7 @@ impl DestroyedResources<back::Backend> {
let mut buffer_guard = HUB.buffers.write();
for buffer_id in self.ready_to_map.drain(..) {
let buffer = buffer_guard.get_mut(buffer_id);
let buffer = &mut buffer_guard[buffer_id];
let mut operation = None;
std::mem::swap(&mut operation, &mut buffer.pending_map_operation);
match operation {
@ -363,7 +410,7 @@ pub fn device_create_buffer(
desc: &resource::BufferDescriptor,
) -> resource::Buffer<back::Backend> {
let device_guard = HUB.devices.read();
let device = &device_guard.get(device_id);
let device = &device_guard[device_id];
let (usage, memory_properties) = conv::map_buffer_usage(desc.usage);
let mut buffer = unsafe {
@ -418,8 +465,7 @@ pub fn device_track_buffer(
) {
let query = HUB.devices
.read()
.get(device_id)
.trackers
[device_id].trackers
.lock()
.buffers
.query(buffer_id, &ref_count, resource::BufferUsageFlags::empty());
@ -442,11 +488,10 @@ pub extern "C" fn wgpu_device_create_buffer(
#[no_mangle]
pub extern "C" fn wgpu_buffer_destroy(buffer_id: BufferId) {
let buffer_guard = HUB.buffers.read();
let buffer = buffer_guard.get(buffer_id);
let buffer = &buffer_guard[buffer_id];
HUB.devices
.read()
.get(buffer.device_id.value)
.destroyed
[buffer.device_id.value].destroyed
.lock()
.destroy(
ResourceId::Buffer(buffer_id),
@ -463,7 +508,7 @@ pub fn device_create_texture(
let aspects = format.surface_desc().aspects;
let usage = conv::map_texture_usage(desc.usage, aspects);
let device_guard = HUB.devices.read();
let device = &device_guard.get(device_id);
let device = &device_guard[device_id];
let mut image = unsafe {
device.raw.create_image(
@ -518,7 +563,7 @@ pub fn device_create_texture(
levels: 0 .. 1, //TODO: mips
layers: 0 .. desc.array_size as u16,
},
swap_chain_link: None,
placement: resource::TexturePlacement::Memory(memory),
life_guard: LifeGuard::new(),
}
}
@ -530,8 +575,7 @@ pub fn device_track_texture(
) {
let query = HUB.devices
.read()
.get(device_id)
.trackers
[device_id].trackers
.lock()
.textures
.query(texture_id, &ref_count, resource::TextureUsageFlags::UNINITIALIZED);
@ -553,26 +597,23 @@ pub extern "C" fn wgpu_device_create_texture(
pub fn texture_create_view(
texture_id: TextureId,
desc: &resource::TextureViewDescriptor,
format: resource::TextureFormat,
view_kind: hal::image::ViewKind,
range: hal::image::SubresourceRange,
) -> resource::TextureView<back::Backend> {
let texture_guard = HUB.textures.read();
let texture = texture_guard.get(texture_id);
let texture = &texture_guard[texture_id];
let raw = unsafe {
HUB.devices
.read()
.get(texture.device_id.value)
.raw
[texture.device_id.value].raw
.create_image_view(
&texture.raw,
conv::map_texture_view_dimension(desc.dimension),
conv::map_texture_format(desc.format),
view_kind,
conv::map_texture_format(format),
hal::format::Swizzle::NO,
hal::image::SubresourceRange {
aspects: conv::map_texture_aspect_flags(desc.aspect),
levels: desc.base_mip_level as u8 .. (desc.base_mip_level + desc.level_count) as u8,
layers: desc.base_array_layer as u16 .. (desc.base_array_layer + desc.array_count) as u16,
},
range,
)
.unwrap()
};
@ -598,17 +639,14 @@ pub fn device_track_view(
) {
let device_id = HUB.textures
.read()
.get(texture_id)
.device_id
.value;
let initialized = HUB.devices
[texture_id].device_id.value;
let query = HUB.devices
.read()
.get(device_id)
.trackers
[device_id].trackers
.lock()
.views
.query(view_id, &ref_count);
assert!(initialized);
.query(view_id, &ref_count, DummyUsage);
assert!(query.initialized);
}
#[cfg(feature = "local")]
@ -617,62 +655,43 @@ pub extern "C" fn wgpu_texture_create_view(
texture_id: TextureId,
desc: &resource::TextureViewDescriptor,
) -> TextureViewId {
let view = texture_create_view(texture_id, desc);
let texture_id = view.texture_id.value;
let view = texture_create_view(
texture_id,
desc.format,
conv::map_texture_view_dimension(desc.dimension),
hal::image::SubresourceRange {
aspects: conv::map_texture_aspect_flags(desc.aspect),
levels: desc.base_mip_level as u8 .. (desc.base_mip_level + desc.level_count) as u8,
layers: desc.base_array_layer as u16 .. (desc.base_array_layer + desc.array_count) as u16,
},
);
let ref_count = view.life_guard.ref_count.clone();
let id = HUB.texture_views.register_local(view);
device_track_view(texture_id, id, ref_count);
id
}
pub fn texture_create_default_view(
texture_id: TextureId
) -> resource::TextureView<back::Backend> {
let texture_guard = HUB.textures.read();
let texture = texture_guard.get(texture_id);
let view_kind = match texture.kind {
hal::image::Kind::D1(_, 1) => hal::image::ViewKind::D1,
hal::image::Kind::D1(..) => hal::image::ViewKind::D1Array,
hal::image::Kind::D2(_, _, 1, _) => hal::image::ViewKind::D2,
hal::image::Kind::D2(..) => hal::image::ViewKind::D2Array,
hal::image::Kind::D3(..) => hal::image::ViewKind::D3,
};
let raw = unsafe{
HUB.devices
.read()
.get(texture.device_id.value)
.raw
.create_image_view(
&texture.raw,
view_kind,
conv::map_texture_format(texture.format),
hal::format::Swizzle::NO,
texture.full_range.clone(),
)
.unwrap()
};
resource::TextureView {
raw,
texture_id: Stored {
value: texture_id,
ref_count: texture.life_guard.ref_count.clone(),
},
format: texture.format,
extent: texture.kind.extent(),
samples: texture.kind.num_samples(),
is_owned_by_swap_chain: false,
life_guard: LifeGuard::new(),
}
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_texture_create_default_view(texture_id: TextureId) -> TextureViewId {
let view = texture_create_default_view(texture_id);
let texture_id = view.texture_id.value;
let (format, view_kind, range) = {
let texture_guard = HUB.textures.read();
let texture = &texture_guard[texture_id];
let view_kind = match texture.kind {
hal::image::Kind::D1(_, 1) => hal::image::ViewKind::D1,
hal::image::Kind::D1(..) => hal::image::ViewKind::D1Array,
hal::image::Kind::D2(_, _, 1, _) => hal::image::ViewKind::D2,
hal::image::Kind::D2(..) => hal::image::ViewKind::D2Array,
hal::image::Kind::D3(..) => hal::image::ViewKind::D3,
};
(texture.format, view_kind, texture.full_range.clone())
};
let view = texture_create_view(
texture_id,
format,
view_kind,
range,
);
let ref_count = view.life_guard.ref_count.clone();
let id = HUB.texture_views.register_local(view);
device_track_view(texture_id, id, ref_count);
@ -682,11 +701,10 @@ pub extern "C" fn wgpu_texture_create_default_view(texture_id: TextureId) -> Tex
#[no_mangle]
pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
let texture_guard = HUB.textures.read();
let texture = texture_guard.get(texture_id);
let texture = &texture_guard[texture_id];
HUB.devices
.read()
.get(texture.device_id.value)
.destroyed
[texture.device_id.value].destroyed
.lock()
.destroy(
ResourceId::Texture(texture_id),
@ -697,15 +715,13 @@ pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
#[no_mangle]
pub extern "C" fn wgpu_texture_view_destroy(texture_view_id: TextureViewId) {
let texture_view_guard = HUB.texture_views.read();
let view = texture_view_guard.get(texture_view_id);
let view = &texture_view_guard[texture_view_id];
let device_id = HUB.textures
.read()
.get(view.texture_id.value)
.device_id.value;
[view.texture_id.value].device_id.value;
HUB.devices
.read()
.get(device_id)
.destroyed
[device_id].destroyed
.lock()
.destroy(
ResourceId::TextureView(texture_view_id),
@ -718,7 +734,7 @@ pub fn device_create_sampler(
device_id: DeviceId, desc: &resource::SamplerDescriptor
) -> resource::Sampler<back::Backend> {
let device_guard = HUB.devices.read();
let device = &device_guard.get(device_id);
let device = &device_guard[device_id];
let info = hal::image::SamplerInfo {
min_filter: conv::map_filter(desc.min_filter),
@ -773,8 +789,7 @@ pub fn device_create_bind_group_layout(
let raw = unsafe {
HUB.devices
.read()
.get(device_id)
.raw
[device_id].raw
.create_descriptor_set_layout(
bindings.iter().map(|binding| {
hal::pso::DescriptorSetLayoutBinding {
@ -815,14 +830,13 @@ pub fn device_create_pipeline_layout(
let bind_group_layout_guard = HUB.bind_group_layouts.read();
let descriptor_set_layouts = bind_group_layout_ids
.iter()
.map(|&id| &bind_group_layout_guard.get(id).raw);
.map(|&id| &bind_group_layout_guard[id].raw);
// TODO: push constants
let pipeline_layout = unsafe {
HUB.devices
.read()
.get(device_id)
.raw
[device_id].raw
.create_pipeline_layout(descriptor_set_layouts, &[])
}
.unwrap();
@ -851,9 +865,9 @@ pub fn device_create_bind_group(
desc: &binding_model::BindGroupDescriptor,
) -> binding_model::BindGroup<back::Backend> {
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let device = &device_guard[device_id];
let bind_group_layout_guard = HUB.bind_group_layouts.read();
let bind_group_layout = bind_group_layout_guard.get(desc.layout);
let bind_group_layout = &bind_group_layout_guard[desc.layout];
let bindings = unsafe {
slice::from_raw_parts(desc.bindings, desc.bindings_length as usize)
};
@ -886,12 +900,12 @@ pub fn device_create_bind_group(
hal::pso::Descriptor::Buffer(&buffer.raw, range)
}
binding_model::BindingResource::Sampler(id) => {
let sampler = sampler_guard.get(id);
let sampler = &sampler_guard[id];
hal::pso::Descriptor::Sampler(&sampler.raw)
}
binding_model::BindingResource::TextureView(id) => {
let view = texture_view_guard.get(id);
used.views.query(id, &view.life_guard.ref_count);
let view = &texture_view_guard[id];
used.views.query(id, &view.life_guard.ref_count, DummyUsage);
used.textures
.transit(
view.texture_id.value,
@ -934,6 +948,11 @@ pub extern "C" fn wgpu_device_create_bind_group(
HUB.bind_groups.register_local(bind_group)
}
#[no_mangle]
pub extern "C" fn wgpu_bind_group_destroy(bind_group_id: BindGroupId) {
HUB.bind_groups.unregister(bind_group_id);
}
pub fn device_create_shader_module(
device_id: DeviceId,
@ -943,8 +962,7 @@ pub fn device_create_shader_module(
let shader = unsafe {
HUB.devices
.read()
.get(device_id)
.raw
[device_id].raw
.create_shader_module(spv)
.unwrap()
};
@ -967,7 +985,7 @@ pub fn device_create_command_encoder(
_desc: &command::CommandEncoderDescriptor,
) -> command::CommandBuffer<back::Backend> {
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let device = &device_guard[device_id];
let dev_stored = Stored {
value: device_id,
@ -1005,7 +1023,7 @@ pub extern "C" fn wgpu_queue_submit(
command_buffer_count: usize,
) {
let mut device_guard = HUB.devices.write();
let device = device_guard.get_mut(queue_id);
let device = &mut device_guard[queue_id];
let mut swap_chain_links = Vec::new();
let command_buffer_ids =
@ -1024,26 +1042,25 @@ pub extern "C" fn wgpu_queue_submit(
let mut command_buffer_guard = HUB.command_buffers.write();
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let texture_view_guard = HUB.texture_views.read();
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
let comb = command_buffer_guard.get_mut(cmb_id);
let comb = &mut command_buffer_guard[cmb_id];
swap_chain_links.extend(comb.swap_chain_links.drain(..));
// update submission IDs
comb.life_guard.submission_index
.store(old_submit_index, Ordering::Release);
for id in comb.trackers.buffers.used() {
buffer_guard
.get(id)
.life_guard
.submission_index
buffer_guard[id].life_guard.submission_index
.store(old_submit_index, Ordering::Release);
}
for id in comb.trackers.textures.used() {
texture_guard
.get(id)
.life_guard
.submission_index
texture_guard[id].life_guard.submission_index
.store(old_submit_index, Ordering::Release);
}
for id in comb.trackers.views.used() {
texture_view_guard[id].life_guard.submission_index
.store(old_submit_index, Ordering::Release);
}
@ -1055,16 +1072,14 @@ pub extern "C" fn wgpu_queue_submit(
hal::command::CommandBufferInheritanceInfo::default(),
);
}
//TODO: fix the consume
let TrackerSet { ref mut buffers, ref mut textures, ref mut views } = *trackers;
command::CommandBuffer::insert_barriers(
&mut transit,
buffers.consume_by_replace(&comb.trackers.buffers),
textures.consume_by_replace(&comb.trackers.textures),
&mut *trackers,
&comb.trackers,
Stitch::Init,
&*buffer_guard,
&*texture_guard,
);
views.consume(&comb.trackers.views);
unsafe {
transit.finish();
}
@ -1085,9 +1100,7 @@ pub extern "C" fn wgpu_queue_submit(
.into_iter()
.flat_map(|link| {
//TODO: check the epoch
surface_guard
.get(link.swap_chain_id)
.swap_chain
surface_guard[link.swap_chain_id].swap_chain
.as_ref()
.map(|swap_chain| (
&swap_chain.frames[link.image_index as usize].sem_available,
@ -1100,7 +1113,7 @@ pub extern "C" fn wgpu_queue_submit(
//TODO: may `OneShot` be enough?
command_buffers: command_buffer_ids
.iter()
.flat_map(|&cmb_id| &command_buffer_guard.get(cmb_id).raw),
.flat_map(|&cmb_id| &command_buffer_guard[cmb_id].raw),
wait_semaphores,
signal_semaphores: &[], //TODO: signal `sem_present`?
};
@ -1115,6 +1128,7 @@ pub extern "C" fn wgpu_queue_submit(
let last_done = {
let mut destroyed = device.destroyed.lock();
destroyed.triage_referenced(&mut *trackers);
destroyed.triage_framebuffers(&mut *device.framebuffers.lock());
let last_done = destroyed.cleanup(&device.raw);
destroyed.handle_mapping(&device.raw);
@ -1145,9 +1159,9 @@ pub fn device_create_render_pipeline(
desc: &pipeline::RenderPipelineDescriptor,
) -> pipeline::RenderPipeline<back::Backend> {
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let device = &device_guard[device_id];
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let layout = &pipeline_layout_guard.get(desc.layout).raw;
let layout = &pipeline_layout_guard[desc.layout].raw;
let shader_module_guard = HUB.shader_modules.read();
let color_states = unsafe {
@ -1218,7 +1232,7 @@ pub fn device_create_render_pipeline(
.to_str()
.to_owned()
.unwrap(), // TODO
module: &shader_module_guard.get(desc.vertex_stage.module).raw,
module: &shader_module_guard[desc.vertex_stage.module].raw,
specialization: hal::pso::Specialization {
// TODO
constants: &[],
@ -1230,7 +1244,7 @@ pub fn device_create_render_pipeline(
.to_str()
.to_owned()
.unwrap(), // TODO
module: &shader_module_guard.get(desc.fragment_stage.module).raw,
module: &shader_module_guard[desc.fragment_stage.module].raw,
specialization: hal::pso::Specialization {
// TODO
constants: &[],
@ -1360,9 +1374,9 @@ pub fn device_create_compute_pipeline(
desc: &pipeline::ComputePipelineDescriptor,
) -> pipeline::ComputePipeline<back::Backend> {
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let device = &device_guard[device_id].raw;
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let layout = &pipeline_layout_guard.get(desc.layout).raw;
let layout = &pipeline_layout_guard[desc.layout].raw;
let pipeline_stage = &desc.compute_stage;
let shader_module_guard = HUB.shader_modules.read();
@ -1371,7 +1385,7 @@ pub fn device_create_compute_pipeline(
.to_str()
.to_owned()
.unwrap(), // TODO
module: &shader_module_guard.get(pipeline_stage.module).raw,
module: &shader_module_guard[pipeline_stage.module].raw,
specialization: hal::pso::Specialization {
// TODO
constants: &[],
@ -1392,7 +1406,7 @@ pub fn device_create_compute_pipeline(
};
let pipeline = unsafe {
device.raw
device
.create_compute_pipeline(&pipeline_desc, None)
.unwrap()
};
@ -1421,13 +1435,13 @@ pub fn device_create_swap_chain(
info!("creating swap chain {:?}", desc);
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let device = &device_guard[device_id];
let mut surface_guard = HUB.surfaces.write();
let surface = surface_guard.get_mut(surface_id);
let surface = &mut surface_guard[surface_id];
let (caps, formats, _present_modes, _composite_alphas) = {
let adapter_guard = HUB.adapters.read();
let adapter = adapter_guard.get(device.adapter_id);
let adapter = &adapter_guard[device.adapter_id];
assert!(surface.raw.supports_queue_family(&adapter.queue_families[0]));
surface.raw.compatibility(&adapter.physical_device)
};
@ -1521,7 +1535,7 @@ pub fn device_create_swap_chain(
levels: 0 .. 1,
layers: 0 .. 1,
},
swap_chain_link: None,
placement: resource::TexturePlacement::Void,
life_guard: LifeGuard::new(),
})
.collect()
@ -1533,13 +1547,11 @@ pub fn swap_chain_populate_textures(
textures: Vec<resource::Texture<back::Backend>>,
) {
let mut surface_guard = HUB.surfaces.write();
let swap_chain = surface_guard
.get_mut(swap_chain_id)
.swap_chain
let swap_chain = surface_guard[swap_chain_id].swap_chain
.as_mut()
.unwrap();
let device_guard = HUB.devices.read();
let device = device_guard.get(swap_chain.device_id.value);
let device = &device_guard[swap_chain.device_id.value];
let mut trackers = device.trackers.lock();
for (i, mut texture) in textures.into_iter().enumerate() {
@ -1557,7 +1569,7 @@ pub fn swap_chain_populate_textures(
)
.unwrap()
};
texture.swap_chain_link = Some(swap_chain::SwapChainLink {
texture.placement = resource::TexturePlacement::SwapChain(swap_chain::SwapChainLink {
swap_chain_id, //TODO: strongly
epoch: Mutex::new(0),
image_index: i as hal::SwapImageIndex,
@ -1588,6 +1600,7 @@ pub fn swap_chain_populate_textures(
trackers.views.query(
view_id.value,
&view_id.ref_count,
DummyUsage,
);
swap_chain.frames.push(swap_chain::Frame {
@ -1620,9 +1633,9 @@ pub extern "C" fn wgpu_buffer_set_sub_data(
start: u32, count: u32, data: *const u8,
) {
let buffer_guard = HUB.buffers.read();
let buffer = buffer_guard.get(buffer_id);
let buffer = &buffer_guard[buffer_id];
let mut device_guard = HUB.devices.write();
let device = device_guard.get_mut(buffer.device_id.value);
let device = &mut device_guard[buffer.device_id.value];
//Note: this is just doing `update_buffer`, which is limited to 64KB
@ -1646,7 +1659,12 @@ pub extern "C" fn wgpu_buffer_set_sub_data(
range: None .. None, //TODO: could be partial
});
// Note: this is not pretty. If we need one-time service command buffers,
// we'll need to have some internal abstractions for them to be safe.
let mut comb = device.com_allocator.allocate(buffer.device_id.clone(), &device.raw);
// mark as used by the next submission, conservatively
let last_submit_index = device.life_guard.submission_index.load(Ordering::Acquire);
comb.life_guard.submission_index.store(last_submit_index + 1, Ordering::Release);
unsafe {
let raw = comb.raw.last_mut().unwrap();
raw.begin(
@ -1692,15 +1710,14 @@ pub extern "C" fn wgpu_buffer_map_read_async(
start: u32, size: u32, callback: BufferMapReadCallback, userdata: *mut u8,
) {
let mut buffer_guard = HUB.buffers.write();
let buffer = buffer_guard.get_mut(buffer_id);
let device_guard = HUB.devices.read();
let device = device_guard.get(buffer.device_id.value);
let buffer = &mut buffer_guard[buffer_id];
let range = start as u64..(start + size) as u64;
buffer.pending_map_operation = Some(BufferMapOperation::Read(range, callback, userdata));
device
.destroyed
HUB.devices
.read()
[buffer.device_id.value].destroyed
.lock()
.map(buffer_id, buffer.life_guard.ref_count.clone());
}
@ -1711,15 +1728,14 @@ pub extern "C" fn wgpu_buffer_map_write_async(
start: u32, size: u32, callback: BufferMapWriteCallback, userdata: *mut u8,
) {
let mut buffer_guard = HUB.buffers.write();
let buffer = buffer_guard.get_mut(buffer_id);
let device_guard = HUB.devices.read();
let device = device_guard.get(buffer.device_id.value);
let buffer = &mut buffer_guard[buffer_id];
let range = start as u64..(start + size) as u64;
buffer.pending_map_operation = Some(BufferMapOperation::Write(range, callback, userdata));
device
.destroyed
HUB.devices
.read()
[buffer.device_id.value].destroyed
.lock()
.map(buffer_id, buffer.life_guard.ref_count.clone());
}
@ -1729,9 +1745,9 @@ pub extern "C" fn wgpu_buffer_unmap(
buffer_id: BufferId,
) {
let mut buffer_guard = HUB.buffers.write();
let buffer = buffer_guard.get_mut(buffer_id);
let mut device_guard = HUB.devices.write();
let device = device_guard.get_mut(buffer.device_id.value);
let buffer = &mut buffer_guard[buffer_id];
let device_guard = HUB.devices.read();
let device = &device_guard[buffer.device_id.value];
if !buffer.mapped_write_ranges.is_empty() {
unsafe { device.raw.flush_mapped_memory_ranges( buffer.mapped_write_ranges.iter().map(|r| {(&buffer.memory, r.clone())}) ).unwrap() }; // TODO
@ -1739,4 +1755,4 @@ pub extern "C" fn wgpu_buffer_unmap(
}
unsafe { device.raw.unmap_memory(&buffer.memory) };
}
}

View File

@ -83,19 +83,32 @@ pub struct Storage<T> {
map: VecMap<(T, Epoch)>,
}
impl<T> Storage<T> {
pub fn get(&self, id: Id) -> &T {
impl<T> ops::Index<Id> for Storage<T> {
type Output = T;
fn index(&self, id: Id) -> &T {
let (ref value, epoch) = self.map[id.0 as usize];
assert_eq!(epoch, id.1);
value
}
pub fn get_mut(&mut self, id: Id) -> &mut T {
}
impl<T> ops::IndexMut<Id> for Storage<T> {
fn index_mut(&mut self, id: Id) -> &mut T {
let (ref mut value, epoch) = self.map[id.0 as usize];
assert_eq!(epoch, id.1);
value
}
}
impl<T> Storage<T> {
pub fn contains(&self, id: Id) -> bool {
match self.map.get(id.0 as usize) {
Some(&(_, epoch)) if epoch == id.1 => true,
_ => false
}
}
}
pub struct Registry<T> {
#[cfg(feature = "local")]
identity: Mutex<IdentityManager>,
@ -132,9 +145,7 @@ impl<T> Registry<T> {
let old = self.data.write().map.insert(id.0 as usize, (value, id.1));
assert!(old.is_none());
}
}
impl<T> Registry<T> {
#[cfg(feature = "local")]
pub fn register_local(&self, value: T) -> Id {
let id = self.identity.lock().alloc();

View File

@ -58,7 +58,7 @@ pub extern "C" fn wgpu_instance_create_surface_from_winit(
) -> SurfaceId {
let raw = HUB.instances
.read()
.get(instance_id)
[instance_id]
.create_surface(window);
let surface = SurfaceHandle::new(raw);
HUB.surfaces.register_local(surface)
@ -76,7 +76,7 @@ pub fn instance_create_surface_from_xlib(
#[cfg(all(unix, feature = "gfx-backend-vulkan"))]
SurfaceHandle::new(HUB.instances
.read()
.get(instance_id)
[instance_id]
.create_surface_from_xlib(display, window)
)
}
@ -103,7 +103,7 @@ pub fn instance_create_surface_from_macos_layer(
#[cfg(feature = "gfx-backend-metal")]
SurfaceHandle::new(HUB.instances
.read()
.get(instance_id)
[instance_id]
.create_surface_from_layer(layer as *mut _)
)
}
@ -130,13 +130,13 @@ pub fn instance_create_surface_from_windows_hwnd(
#[cfg(any(feature = "gfx-backend-dx11", feature = "gfx-backend-dx12"))]
let raw = HUB.instances
.read()
.get(instance_id)
[instance_id]
.create_surface_from_hwnd(hwnd);
#[cfg(all(target_os = "windows", feature = "gfx-backend-vulkan"))]
let raw = HUB.instances
.read()
.get(instance_id)
[instance_id]
.create_surface_from_hwnd(hinstance, hwnd);
#[cfg_attr(not(target_os = "windows"), allow(unreachable_code))]
@ -159,7 +159,7 @@ pub fn instance_get_adapter(
desc: &AdapterDescriptor,
) -> AdapterHandle {
let instance_guard = HUB.instances.read();
let instance = instance_guard.get(instance_id);
let instance = &instance_guard[instance_id];
let (mut low, mut high, mut other) = (None, None, None);
for adapter in instance.enumerate_adapters() {
match adapter.info.device_type {
@ -190,8 +190,8 @@ pub fn adapter_create_device(
adapter_id: AdapterId,
_desc: &DeviceDescriptor,
) -> DeviceHandle {
let mut adapter_guard = HUB.adapters.write();
let adapter = adapter_guard.get_mut(adapter_id);
let adapter_guard = HUB.adapters.read();
let adapter = &adapter_guard[adapter_id];
let (raw, queue_group) = adapter.open_with::<_, hal::General>(1, |_qf| true).unwrap();
let mem_props = adapter.physical_device.memory_properties();
DeviceHandle::new(raw, adapter_id, queue_group, mem_props)

View File

@ -44,7 +44,6 @@ pub use self::hub::{HUB, Id, IdentityManager, Registry};
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering};
pub const MAX_DEPTH_BIAS_CLAMP: f32 = 16.0;
type SubmissionIndex = usize;
//TODO: make it private. Currently used for swapchain creation impl.

View File

@ -105,13 +105,29 @@ pub struct TextureDescriptor {
pub usage: TextureUsageFlags,
}
pub(crate) enum TexturePlacement<B: hal::Backend> {
SwapChain(SwapChainLink<Mutex<SwapImageEpoch>>),
Memory(B::Memory),
Void,
}
impl<B: hal::Backend> TexturePlacement<B> {
pub fn as_swap_chain(&self) -> &SwapChainLink<Mutex<SwapImageEpoch>> {
match *self {
TexturePlacement::SwapChain(ref link) => link,
TexturePlacement::Memory(_) |
TexturePlacement::Void => panic!("Expected swap chain link!"),
}
}
}
pub struct Texture<B: hal::Backend> {
pub(crate) raw: B::Image,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) kind: hal::image::Kind,
pub(crate) format: TextureFormat,
pub(crate) full_range: hal::image::SubresourceRange,
pub(crate) swap_chain_link: Option<SwapChainLink<Mutex<SwapImageEpoch>>>,
pub(crate) placement: TexturePlacement<B>,
pub(crate) life_guard: LifeGuard,
}

View File

@ -10,6 +10,7 @@ use crate::track::{TrackPermit};
use hal;
use hal::{Device as _Device, Swapchain as _Swapchain};
use log::{trace, warn};
use parking_lot::Mutex;
use std::{iter, mem};
@ -22,6 +23,12 @@ pub(crate) struct SwapChainLink<E> {
pub image_index: hal::SwapImageIndex,
}
impl SwapChainLink<Mutex<SwapImageEpoch>> {
pub fn bump_epoch(&self) {
*self.epoch.lock() += 1;
}
}
pub struct Surface<B: hal::Backend> {
pub(crate) raw: B::Surface,
pub(crate) swap_chain: Option<SwapChain<B>>,
@ -95,8 +102,7 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(
let (image_index, device_id, descriptor) = {
let mut surface_guard = HUB.surfaces.write();
let swap_chain = surface_guard
.get_mut(swap_chain_id)
.swap_chain
[swap_chain_id].swap_chain
.as_mut()
.unwrap();
let sync = hal::FrameSync::Semaphore(&swap_chain.sem_available);
@ -120,8 +126,7 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(
let mut surface_guard = HUB.surfaces.write();
let swap_chain = surface_guard
.get_mut(swap_chain_id)
.swap_chain
[swap_chain_id].swap_chain
.as_mut()
.unwrap();
@ -134,7 +139,7 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(
};
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let device = &device_guard[device_id];
assert_ne!(swap_chain.acquired.len(), swap_chain.acquired.capacity(),
"Unable to acquire any more swap chain images before presenting");
@ -146,14 +151,7 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(
}
mem::swap(&mut frame.sem_available, &mut swap_chain.sem_available);
match HUB.textures
.read()
.get(frame.texture_id.value)
.swap_chain_link
{
Some(ref link) => *link.epoch.lock() += 1,
None => unreachable!(),
}
HUB.textures.read()[frame.texture_id.value].placement.as_swap_chain().bump_epoch();
SwapChainOutput {
texture_id: frame.texture_id.value,
@ -167,8 +165,7 @@ pub extern "C" fn wgpu_swap_chain_present(
) {
let mut surface_guard = HUB.surfaces.write();
let swap_chain = surface_guard
.get_mut(swap_chain_id)
.swap_chain
[swap_chain_id].swap_chain
.as_mut()
.unwrap();
@ -176,14 +173,11 @@ pub extern "C" fn wgpu_swap_chain_present(
let frame = &mut swap_chain.frames[image_index as usize];
let mut device_guard = HUB.devices.write();
let device = device_guard.get_mut(swap_chain.device_id.value);
let device = &mut device_guard[swap_chain.device_id.value];
let texture_guard = HUB.textures.read();
let texture = texture_guard.get(frame.texture_id.value);
match texture.swap_chain_link {
Some(ref link) => *link.epoch.lock() += 1,
None => unreachable!(),
}
let texture = &texture_guard[frame.texture_id.value];
texture.placement.as_swap_chain().bump_epoch();
//TODO: support for swapchain being sampled or read by the shader?

View File

@ -56,6 +56,15 @@ bitflags! {
pub trait GenericUsage {
fn is_exclusive(&self) -> bool;
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct DummyUsage;
impl BitOr for DummyUsage {
type Output = Self;
fn bitor(self, other: Self) -> Self {
other
}
}
impl GenericUsage for BufferUsageFlags {
fn is_exclusive(&self) -> bool {
BufferUsageFlags::WRITE_ALL.intersects(*self)
@ -66,6 +75,11 @@ impl GenericUsage for TextureUsageFlags {
TextureUsageFlags::WRITE_ALL.intersects(*self)
}
}
impl GenericUsage for DummyUsage {
fn is_exclusive(&self) -> bool {
false
}
}
#[derive(Clone)]
struct Track<U> {
@ -82,11 +96,17 @@ pub struct Tracker<I, U> {
}
pub type BufferTracker = Tracker<BufferId, BufferUsageFlags>;
pub type TextureTracker = Tracker<TextureId, TextureUsageFlags>;
pub struct DummyTracker<I> {
map: FastHashMap<Index, (RefCount, Epoch)>,
_phantom: PhantomData<I>,
pub type TextureViewTracker = Tracker<TextureViewId, DummyUsage>;
//TODO: make this a generic parameter.
/// Mode of stitching to states together.
#[derive(Clone, Copy, Debug)]
pub enum Stitch {
/// Stitch to the init state of the other resource.
Init,
/// Stitch to the last sttate of the other resource.
Last,
}
pub type TextureViewTracker = DummyTracker<TextureViewId>;
pub struct TrackerSet {
pub buffers: BufferTracker,
@ -103,46 +123,17 @@ impl TrackerSet {
views: TextureViewTracker::new(),
}
}
}
impl<I: NewId> DummyTracker<I> {
pub fn new() -> Self {
DummyTracker {
map: FastHashMap::default(),
_phantom: PhantomData,
}
}
/// Remove an id from the tracked map.
pub(crate) fn remove(&mut self, id: I) -> bool {
match self.map.remove(&id.index()) {
Some((_, epoch)) => {
assert_eq!(epoch, id.epoch());
true
}
None => false,
}
}
/// Get the last usage on a resource.
pub(crate) fn query(&mut self, id: I, ref_count: &RefCount) -> bool {
match self.map.entry(id.index()) {
Entry::Vacant(e) => {
e.insert((ref_count.clone(), id.epoch()));
true
}
Entry::Occupied(e) => {
assert_eq!(e.get().1, id.epoch());
false
}
}
}
/// Consume another tacker.
pub fn consume(&mut self, other: &Self) {
for (&index, &(ref ref_count, epoch)) in &other.map {
self.query(I::new(index, epoch), ref_count);
}
pub fn consume_by_extend(&mut self, other: &Self) {
self.buffers
.consume_by_extend(&other.buffers)
.unwrap();
self.textures
.consume_by_extend(&other.textures)
.unwrap();
self.views
.consume_by_extend(&other.views)
.unwrap();
}
}
@ -228,7 +219,11 @@ impl<I: NewId, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<I
/// Consume another tacker, adding it's transitions to `self`.
/// Transitions the current usage to the new one.
pub fn consume_by_replace<'a>(&'a mut self, other: &'a Self) -> impl 'a + Iterator<Item = (I, Range<U>)> {
pub fn consume_by_replace<'a>(
&'a mut self,
other: &'a Self,
stitch: Stitch,
) -> impl 'a + Iterator<Item = (I, Range<U>)> {
other.map.iter().flat_map(move |(&index, new)| {
match self.map.entry(index) {
Entry::Vacant(e) => {
@ -241,7 +236,11 @@ impl<I: NewId, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<I
if old == new.init {
None
} else {
Some((I::new(index, new.epoch), old .. new.last))
let state = match stitch {
Stitch::Init => new.init,
Stitch::Last => new.last,
};
Some((I::new(index, new.epoch), old .. state))
}
}
}
@ -287,7 +286,7 @@ impl<U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<Id, U> {
usage: U,
permit: TrackPermit,
) -> Result<(&'a T, Tracktion<U>), U> {
let item = storage.get(id);
let item = &storage[id];
self.transit(id, item.borrow(), usage, permit)
.map(|tracktion| (item, tracktion))
}
@ -298,7 +297,7 @@ impl<U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<Id, U> {
id: Id,
usage: U,
) -> Result<&'a T, U> {
let item = storage.get(id);
let item = &storage[id];
self.transit(id, item.borrow(), usage, TrackPermit::EXTEND)
.map(|_tracktion| item)
}
@ -309,7 +308,7 @@ impl<U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<Id, U> {
id: Id,
usage: U,
) -> Result<(&'a T, Option<U>), U> {
let item = storage.get(id);
let item = &storage[id];
self.transit(id, item.borrow(), usage, TrackPermit::REPLACE)
.map(|tracktion| (item, match tracktion {
Tracktion::Init |

View File

@ -10,7 +10,6 @@ use std::slice;
pub use wgn::winit;
pub use wgn::{
MAX_DEPTH_BIAS_CLAMP,
AdapterDescriptor, BindGroupLayoutBinding, BindingType,
BlendDescriptor, BlendOperation, BlendFactor, BufferMapAsyncStatus, ColorWriteFlags,
RasterizationStateDescriptor, CullMode, FrontFace,
@ -83,6 +82,12 @@ pub struct BindGroup {
id: wgn::BindGroupId,
}
impl Drop for BindGroup {
fn drop(&mut self) {
wgn::wgpu_bind_group_destroy(self.id);
}
}
pub struct ShaderModule {
id: wgn::ShaderModuleId,
}
@ -247,7 +252,7 @@ impl Instance {
id: wgn::wgpu_instance_create_surface_from_winit(self.id, window),
}
}
#[cfg(feature = "metal")]
pub fn create_surface_with_metal_layer(&self, window: *mut std::ffi::c_void) -> Surface {
Surface {