237: Enforce the HUB lock order r=grovesNL a=kvark

Fixes #66
cc @jrmuizel @m4b

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot] 2019-07-05 03:27:11 +00:00
commit 183058d618
11 changed files with 531 additions and 256 deletions

View File

@ -1,3 +1,4 @@
newline_style = "Native"
blank_lines_upper_bound = 2
spaces_around_ranges = true
imports_layout = "HorizontalVertical"

View File

@ -1,6 +1,6 @@
use crate::{
command::bind::{Binder, LayoutChange},
hub::HUB,
hub::{HUB, Token},
track::{Stitch, TrackerSet},
BindGroupId,
BufferAddress,
@ -44,8 +44,9 @@ impl<B: hal::Backend> ComputePass<B> {
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_end_pass(pass_id: ComputePassId) -> CommandBufferId {
let mut cmb_guard = HUB.command_buffers.write();
let pass = HUB.compute_passes.unregister(pass_id);
let mut token = Token::root();
let (mut cmb_guard, mut token) = HUB.command_buffers.write(&mut token);
let (pass, _) = HUB.compute_passes.unregister(pass_id, &mut token);
let cmb = &mut cmb_guard[pass.cmb_id.value];
// There are no transitions to be made: we've already been inserting barriers
@ -63,9 +64,11 @@ pub extern "C" fn wgpu_compute_pass_set_bind_group(
offsets: *const BufferAddress,
offsets_length: usize,
) {
let mut pass_guard = HUB.compute_passes.write();
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = HUB.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = HUB.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = HUB.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group_guard = HUB.bind_groups.read();
let bind_group = pass
.trackers
@ -94,21 +97,22 @@ pub extern "C" fn wgpu_compute_pass_set_bind_group(
//Note: currently, WebGPU compute passes have synchronization defined
// at a dispatch granularity, so we insert the necessary barriers here.
let (buffer_guard, mut token) = HUB.buffers.read(&mut token);
let (texture_guard, _) = HUB.textures.read(&mut token);
CommandBuffer::insert_barriers(
&mut pass.raw,
&mut pass.trackers,
&bind_group.used,
Stitch::Last,
&*HUB.buffers.read(),
&*HUB.textures.read(),
&*buffer_guard,
&*texture_guard,
);
if let Some((pipeline_layout_id, follow_up)) =
pass.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let bind_groups =
iter::once(bind_group.raw.raw()).chain(follow_up.map(|bg_id| bind_group_guard[bg_id].raw.raw()));
unsafe {
@ -144,8 +148,10 @@ pub extern "C" fn wgpu_compute_pass_insert_debug_marker(
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_dispatch(pass_id: ComputePassId, x: u32, y: u32, z: u32) {
let mut token = Token::root();
let (mut pass_guard, _) = HUB.compute_passes.write(&mut token);
unsafe {
HUB.compute_passes.write()[pass_id].raw.dispatch([x, y, z]);
pass_guard[pass_id].raw.dispatch([x, y, z]);
}
}
@ -154,9 +160,12 @@ pub extern "C" fn wgpu_compute_pass_set_pipeline(
pass_id: ComputePassId,
pipeline_id: ComputePipelineId,
) {
let mut pass_guard = HUB.compute_passes.write();
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = HUB.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = HUB.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = HUB.compute_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let pipeline_guard = HUB.compute_pipelines.read();
let (pipeline_guard, _) = HUB.compute_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
unsafe {
@ -165,10 +174,7 @@ pub extern "C" fn wgpu_compute_pass_set_pipeline(
// Rebind resources
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
let bind_group_guard = HUB.bind_groups.read();
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());

View File

@ -18,7 +18,7 @@ use crate::{
RenderPassContext,
RenderPassKey,
},
hub::{Storage, HUB},
hub::{HUB, Storage, Token},
resource::TexturePlacement,
swap_chain::{SwapChainLink, SwapImageEpoch},
track::{Stitch, TrackerSet},
@ -163,8 +163,10 @@ pub struct CommandEncoderDescriptor {
pub extern "C" fn wgpu_command_encoder_finish(
command_encoder_id: CommandEncoderId,
) -> CommandBufferId {
let mut token = Token::root();
//TODO: actually close the last recorded command buffer
HUB.command_buffers.write()[command_encoder_id].is_recording = false; //TODO: check for the old value
let (mut comb_guard, _) = HUB.command_buffers.write(&mut token);
comb_guard[command_encoder_id].is_recording = false; //TODO: check for the old value
command_encoder_id
}
@ -172,12 +174,16 @@ pub fn command_encoder_begin_render_pass(
command_encoder_id: CommandEncoderId,
desc: &RenderPassDescriptor,
) -> RenderPass<Backend> {
let device_guard = HUB.devices.read();
let mut cmb_guard = HUB.command_buffers.write();
let mut token = Token::root();
let (adapter_guard, mut token) = HUB.adapters.read(&mut token);
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (mut cmb_guard, mut token) = HUB.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let device = &device_guard[cmb.device_id.value];
let texture_guard = HUB.textures.read();
let view_guard = HUB.texture_views.read();
let (_, mut token) = HUB.buffers.read(&mut token); //skip token
let (texture_guard, mut token) = HUB.textures.read(&mut token);
let (view_guard, _) = HUB.texture_views.read(&mut token);
let mut current_comb = device.com_allocator.extend(cmb);
unsafe {
@ -189,7 +195,7 @@ pub fn command_encoder_begin_render_pass(
let mut extent = None;
let mut barriers = Vec::new();
let limits = HUB.adapters.read()[device.adapter_id].physical_device.limits();
let limits = adapter_guard[device.adapter_id].physical_device.limits();
let samples_count_limit = limits.framebuffer_color_samples_count;
let color_attachments =
@ -566,13 +572,14 @@ pub extern "C" fn wgpu_command_encoder_begin_render_pass(
desc: &RenderPassDescriptor,
) -> RenderPassId {
let pass = command_encoder_begin_render_pass(command_encoder_id, desc);
HUB.render_passes.register_local(pass)
HUB.render_passes.register_local(pass, &mut Token::root())
}
pub fn command_encoder_begin_compute_pass(
command_encoder_id: CommandEncoderId,
) -> ComputePass<Backend> {
let mut cmb_guard = HUB.command_buffers.write();
let mut token = Token::root();
let (mut cmb_guard, _) = HUB.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_encoder_id];
let raw = cmb.raw.pop().unwrap();
@ -591,5 +598,5 @@ pub extern "C" fn wgpu_command_encoder_begin_compute_pass(
command_encoder_id: CommandEncoderId,
) -> ComputePassId {
let pass = command_encoder_begin_compute_pass(command_encoder_id);
HUB.compute_passes.register_local(pass)
HUB.compute_passes.register_local(pass, &mut Token::root())
}

View File

@ -2,7 +2,7 @@ use crate::{
command::bind::{Binder, LayoutChange},
conv,
device::{RenderPassContext, BIND_BUFFER_ALIGNMENT, MAX_VERTEX_BUFFERS},
hub::HUB,
hub::{Token, HUB},
pipeline::{IndexFormat, InputStepMode, PipelineFlags},
resource::BufferUsage,
track::{Stitch, TrackerSet},
@ -175,13 +175,16 @@ impl<B: hal::Backend> RenderPass<B> {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBufferId {
let mut cmb_guard = HUB.command_buffers.write();
let mut pass = HUB.render_passes.unregister(pass_id);
let mut token = Token::root();
let (mut cmb_guard, mut token) = HUB.command_buffers.write(&mut token);
let (mut pass, mut token) = HUB.render_passes.unregister(pass_id, &mut token);
unsafe {
pass.raw.end_render_pass();
}
pass.trackers.optimize();
let cmb = &mut cmb_guard[pass.cmb_id.value];
let (buffer_guard, mut token) = HUB.buffers.read(&mut token);
let (texture_guard, _) = HUB.textures.read(&mut token);
match cmb.raw.last_mut() {
Some(ref mut last) => {
@ -190,8 +193,8 @@ pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBuf
&mut cmb.trackers,
&pass.trackers,
Stitch::Last,
&*HUB.buffers.read(),
&*HUB.textures.read(),
&*buffer_guard,
&*texture_guard,
);
unsafe { last.finish() };
}
@ -212,9 +215,12 @@ pub extern "C" fn wgpu_render_pass_set_bind_group(
offsets: *const BufferAddress,
offsets_length: usize,
) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = HUB.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = HUB.bind_groups.read(&mut token);
let (mut pass_guard, _) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let bind_group_guard = HUB.bind_groups.read();
let bind_group = pass
.trackers
@ -247,7 +253,6 @@ pub extern "C" fn wgpu_render_pass_set_bind_group(
pass.binder
.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let bind_groups =
iter::once(bind_group.raw.raw()).chain(follow_up.map(|bg_id| bind_group_guard[bg_id].raw.raw()));
unsafe {
@ -284,8 +289,9 @@ pub extern "C" fn wgpu_render_pass_set_index_buffer(
buffer_id: BufferId,
offset: BufferAddress,
) {
let mut pass_guard = HUB.render_passes.write();
let buffer_guard = HUB.buffers.read();
let mut token = Token::root();
let (mut pass_guard, mut token) = HUB.render_passes.write(&mut token);
let (buffer_guard, _) = HUB.buffers.read(&mut token);
let pass = &mut pass_guard[pass_id];
let buffer = pass
@ -316,8 +322,9 @@ pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
offsets: *const BufferAddress,
length: usize,
) {
let mut pass_guard = HUB.render_passes.write();
let buffer_guard = HUB.buffers.read();
let mut token = Token::root();
let (mut pass_guard, mut token) = HUB.render_passes.write(&mut token);
let (buffer_guard, _) = HUB.buffers.read(&mut token);
let buffers = unsafe { slice::from_raw_parts(buffers, length) };
let offsets = unsafe { slice::from_raw_parts(offsets, length) };
@ -353,7 +360,8 @@ pub extern "C" fn wgpu_render_pass_draw(
first_vertex: u32,
first_instance: u32,
) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (mut pass_guard, _) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
@ -377,7 +385,8 @@ pub extern "C" fn wgpu_render_pass_draw_indexed(
base_vertex: i32,
first_instance: u32,
) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (mut pass_guard, _) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.is_ready().unwrap();
@ -399,9 +408,12 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
pass_id: RenderPassId,
pipeline_id: RenderPipelineId,
) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = HUB.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = HUB.bind_groups.read(&mut token);
let (mut pass_guard, mut token) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
let pipeline_guard = HUB.render_pipelines.read();
let (pipeline_guard, mut token) = HUB.render_pipelines.read(&mut token);
let pipeline = &pipeline_guard[pipeline_id];
assert!(
@ -421,10 +433,7 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
// Rebind resource
if pass.binder.pipeline_layout_id != Some(pipeline.layout_id.clone()) {
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id];
let bind_group_guard = HUB.bind_groups.read();
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder
.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
@ -456,7 +465,7 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
pass.index_state.update_limit();
if let Some((buffer_id, ref range)) = pass.index_state.bound_buffer_view {
let buffer_guard = HUB.buffers.read();
let (buffer_guard, _) = HUB.buffers.read(&mut token);
let buffer = pass
.trackers
.buffers
@ -488,7 +497,8 @@ pub extern "C" fn wgpu_render_pass_set_pipeline(
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_blend_color(pass_id: RenderPassId, color: &Color) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (mut pass_guard, _) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.blend_color_status = OptionalState::Set;
@ -500,7 +510,8 @@ pub extern "C" fn wgpu_render_pass_set_blend_color(pass_id: RenderPassId, color:
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_stencil_reference(pass_id: RenderPassId, value: u32) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (mut pass_guard, _) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
pass.stencil_reference_status = OptionalState::Set;
@ -520,7 +531,8 @@ pub extern "C" fn wgpu_render_pass_set_viewport(
min_depth: f32,
max_depth: f32,
) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (mut pass_guard, _) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {
@ -550,7 +562,8 @@ pub extern "C" fn wgpu_render_pass_set_scissor_rect(
w: u32,
h: u32,
) {
let mut pass_guard = HUB.render_passes.write();
let mut token = Token::root();
let (mut pass_guard, _) = HUB.render_passes.write(&mut token);
let pass = &mut pass_guard[pass_id];
unsafe {

View File

@ -1,7 +1,7 @@
use crate::{
conv,
device::{all_buffer_stages, all_image_stages},
hub::HUB,
hub::{HUB, Token},
resource::TexturePlacement,
swap_chain::SwapChainLink,
BufferAddress,
@ -73,9 +73,10 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_buffer(
dst_offset: BufferAddress,
size: BufferAddress,
) {
let mut cmb_guard = HUB.command_buffers.write();
let mut token = Token::root();
let (mut cmb_guard, mut token) = HUB.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let (buffer_guard, _) = HUB.buffers.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
@ -125,10 +126,11 @@ pub extern "C" fn wgpu_command_buffer_copy_buffer_to_texture(
destination: &TextureCopyView,
copy_size: Extent3d,
) {
let mut cmb_guard = HUB.command_buffers.write();
let mut token = Token::root();
let (mut cmb_guard, mut token) = HUB.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let (buffer_guard, mut token) = HUB.buffers.read(&mut token);
let (texture_guard, _) = HUB.textures.read(&mut token);
let aspects = texture_guard[destination.texture].full_range.aspects;
let (src_buffer, src_pending) = cmb
@ -202,10 +204,11 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_buffer(
destination: &BufferCopyView,
copy_size: Extent3d,
) {
let mut cmb_guard = HUB.command_buffers.write();
let mut token = Token::root();
let (mut cmb_guard, mut token) = HUB.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_buffer_id];
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let (buffer_guard, mut token) = HUB.buffers.read(&mut token);
let (texture_guard, _) = HUB.textures.read(&mut token);
let aspects = texture_guard[source.texture].full_range.aspects;
let (src_texture, src_pending) = cmb.trackers.textures.use_replace(
@ -278,9 +281,11 @@ pub extern "C" fn wgpu_command_buffer_copy_texture_to_texture(
destination: &TextureCopyView,
copy_size: Extent3d,
) {
let mut cmb_guard = HUB.command_buffers.write();
let mut token = Token::root();
let (mut cmb_guard, mut token) = HUB.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[command_buffer_id];
let texture_guard = HUB.textures.read();
let (_, mut token) = HUB.buffers.read(&mut token); // skip token
let (texture_guard, _) = HUB.textures.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();

View File

@ -2,7 +2,7 @@ use crate::{
binding_model,
command,
conv,
hub::HUB,
hub::{HUB, Root, Token},
pipeline,
resource,
swap_chain,
@ -253,14 +253,27 @@ impl<B: hal::Backend> PendingResources<B> {
}
}
type DeviceToken<'a> = Token<'a, Device<back::Backend>>;
impl PendingResources<back::Backend> {
fn triage_referenced(&mut self, trackers: &mut TrackerSet) {
fn triage_referenced(&mut self, trackers: &mut TrackerSet, mut token: &mut DeviceToken) {
// Before destruction, a resource is expected to have the following strong refs:
// - in resource itself
// - in the device tracker
// - in this list
const MIN_REFS: usize = 4;
if self.referenced.iter().all(|r| r.1.load() >= MIN_REFS) {
return;
}
//TODO: lock less, if possible
let (mut bind_group_guard, mut token) = HUB.bind_groups.write(&mut token);
let (mut buffer_guard, mut token) = HUB.buffers.write(&mut token);
let (mut texture_guard, mut token) = HUB.textures.write(&mut token);
let (mut teview_view_guard, _) = HUB.texture_views.write(&mut token);
for i in (0 .. self.referenced.len()).rev() {
let num_refs = self.referenced[i].1.load();
// Before destruction, a resource is expected to have the following strong refs:
// 1. in resource itself
// 2. in the device tracker
// 3. in this list
if num_refs <= 3 {
let resource_id = self.referenced.swap_remove(i).0;
assert_eq!(
@ -270,16 +283,20 @@ impl PendingResources<back::Backend> {
);
let (life_guard, resource) = match resource_id {
ResourceId::Buffer(id) => {
if HUB.buffers.read()[id].pending_map_operation.is_some() {
if buffer_guard[id].pending_map_operation.is_some() {
continue
}
trackers.buffers.remove(id);
let buf = HUB.buffers.unregister(id);
let buf = buffer_guard.remove(id);
#[cfg(feature = "local")]
HUB.buffers.identity.lock().free(id);
(buf.life_guard, NativeResource::Buffer(buf.raw, buf.memory))
}
ResourceId::Texture(id) => {
trackers.textures.remove(id);
let tex = HUB.textures.unregister(id);
let tex = texture_guard.remove(id);
#[cfg(feature = "local")]
HUB.textures.identity.lock().free(id);
let memory = match tex.placement {
// swapchain-owned images don't need explicit destruction
resource::TexturePlacement::SwapChain(_) => continue,
@ -290,12 +307,16 @@ impl PendingResources<back::Backend> {
}
ResourceId::TextureView(id) => {
trackers.views.remove(id);
let view = HUB.texture_views.unregister(id);
let view = teview_view_guard.remove(id);
#[cfg(feature = "local")]
HUB.texture_views.identity.lock().free(id);
(view.life_guard, NativeResource::ImageView(view.raw))
}
ResourceId::BindGroup(id) => {
trackers.bind_groups.remove(id);
let bind_group = HUB.bind_groups.unregister(id);
let bind_group = bind_group_guard.remove(id);
#[cfg(feature = "local")]
HUB.bind_groups.identity.lock().free(id);
(bind_group.life_guard, NativeResource::DescriptorSet(bind_group.raw))
}
};
@ -309,8 +330,13 @@ impl PendingResources<back::Backend> {
}
}
}
}
let buffer_guard = HUB.buffers.read();
fn triage_mapped(&mut self, mut token: &mut DeviceToken) {
if self.mapped.is_empty() {
return;
}
let (buffer_guard, _) = HUB.buffers.read(&mut token);
for stored in self.mapped.drain(..) {
let resource_id = stored.value;
@ -338,8 +364,9 @@ impl PendingResources<back::Backend> {
FramebufferKey,
<back::Backend as hal::Backend>::Framebuffer,
>,
mut token: &mut DeviceToken,
) {
let texture_view_guard = HUB.texture_views.read();
let (texture_view_guard, _) = HUB.texture_views.read(&mut token);
let remove_list = framebuffers
.keys()
.filter_map(|key| {
@ -375,11 +402,15 @@ impl PendingResources<back::Backend> {
fn handle_mapping(
&mut self,
raw: &<back::Backend as hal::Backend>::Device,
mut token: &mut DeviceToken,
) -> Vec<BufferMapPendingCallback> {
if self.ready_to_map.is_empty() {
return Vec::new()
}
let (mut buffer_guard, _) = HUB.buffers.write(&mut token);
self.ready_to_map
.drain(..)
.map(|buffer_id| {
let mut buffer_guard = HUB.buffers.write();
let buffer = &mut buffer_guard[buffer_id];
let operation = buffer.pending_map_operation.take().unwrap();
let result = match operation {
@ -506,14 +537,17 @@ impl<B: hal::Backend> Device<B> {
}
impl Device<back::Backend> {
fn maintain(&self, force_wait: bool) -> Vec<BufferMapPendingCallback> {
fn maintain(
&self, force_wait: bool, token: &mut DeviceToken,
) -> Vec<BufferMapPendingCallback> {
let mut pending = self.pending.lock();
let mut trackers = self.trackers.lock();
pending.triage_referenced(&mut *trackers);
pending.triage_framebuffers(&mut *self.framebuffers.lock());
pending.triage_referenced(&mut *trackers, token);
pending.triage_mapped(token);
pending.triage_framebuffers(&mut *self.framebuffers.lock(), token);
let last_done = pending.cleanup(&self.raw, &self.mem_allocator, &self.desc_allocator, force_wait);
let callbacks = pending.handle_mapping(&self.raw);
let callbacks = pending.handle_mapping(&self.raw, token);
unsafe {
self.desc_allocator
@ -556,7 +590,7 @@ pub fn device_create_buffer(
device_id: DeviceId,
desc: &resource::BufferDescriptor,
) -> resource::Buffer<back::Backend> {
let device_guard = HUB.devices.read();
let (device_guard, _) = HUB.devices.read(&mut Token::root());
let device = &device_guard[device_id];
let (usage, _memory_properties) = conv::map_buffer_usage(desc.usage);
@ -613,8 +647,10 @@ pub fn device_track_buffer(
buffer_id: BufferId,
ref_count: RefCount,
flags: resource::BufferUsage,
token: &mut Token<Root>,
) {
let ok = HUB.devices.read()[device_id]
let (device_guard, _) = HUB.devices.read(token);
let ok = device_guard[device_id]
.trackers
.lock()
.buffers
@ -630,8 +666,9 @@ pub extern "C" fn wgpu_device_create_buffer(
) -> BufferId {
let buffer = device_create_buffer(device_id, desc);
let ref_count = buffer.life_guard.ref_count.clone();
let id = HUB.buffers.register_local(buffer);
device_track_buffer(device_id, id, ref_count, resource::BufferUsage::empty());
let mut token = Token::root();
let id = HUB.buffers.register_local(buffer, &mut token);
device_track_buffer(device_id, id, ref_count, resource::BufferUsage::empty(), &mut token);
id
}
@ -645,9 +682,10 @@ pub extern "C" fn wgpu_device_create_buffer_mapped(
let mut desc = desc.clone();
desc.usage |= resource::BufferUsage::MAP_WRITE;
let mut buffer = device_create_buffer(device_id, &desc);
let mut token = Token::root();
{
let device_guard = HUB.devices.read();
let (device_guard, _) = HUB.devices.read(&mut token);
let device = &device_guard[device_id];
match map_buffer(
@ -669,15 +707,16 @@ pub extern "C" fn wgpu_device_create_buffer_mapped(
}
let ref_count = buffer.life_guard.ref_count.clone();
let id = HUB.buffers.register_local(buffer);
device_track_buffer(device_id, id, ref_count, resource::BufferUsage::MAP_WRITE);
let id = HUB.buffers.register_local(buffer, &mut token);
device_track_buffer(device_id, id, ref_count, resource::BufferUsage::MAP_WRITE, &mut token);
id
}
#[no_mangle]
pub extern "C" fn wgpu_buffer_destroy(buffer_id: BufferId) {
let device_guard = HUB.devices.read();
let buffer_guard = HUB.buffers.read();
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (buffer_guard, _) = HUB.buffers.read(&mut token);
let buffer = &buffer_guard[buffer_id];
device_guard[buffer.device_id.value].pending.lock().destroy(
ResourceId::Buffer(buffer_id),
@ -693,7 +732,8 @@ pub fn device_create_texture(
let format = conv::map_texture_format(desc.format);
let aspects = format.surface_desc().aspects;
let usage = conv::map_texture_usage(desc.usage, aspects);
let device_guard = HUB.devices.read();
let (device_guard, _) = HUB.devices.read(&mut Token::root());
let device = &device_guard[device_id];
assert!((desc.mip_level_count as usize) < MAX_MIP_LEVELS);
@ -751,8 +791,10 @@ pub fn device_track_texture(
texture_id: TextureId,
ref_count: RefCount,
full_range: hal::image::SubresourceRange,
token: &mut Token<Root>,
) {
let ok = HUB.devices.read()[device_id]
let (device_guard, _) = HUB.devices.read(token);
let ok = device_guard[device_id]
.trackers
.lock()
.textures
@ -774,8 +816,9 @@ pub extern "C" fn wgpu_device_create_texture(
let texture = device_create_texture(device_id, desc);
let ref_count = texture.life_guard.ref_count.clone();
let range = texture.full_range.clone();
let id = HUB.textures.register_local(texture);
device_track_texture(device_id, id, ref_count, range);
let mut token = Token::root();
let id = HUB.textures.register_local(texture, &mut token);
device_track_texture(device_id, id, ref_count, range, &mut token);
id
}
@ -784,9 +827,10 @@ pub fn texture_create_view(
format: resource::TextureFormat,
view_kind: hal::image::ViewKind,
range: hal::image::SubresourceRange,
token: &mut Token<Root>,
) -> resource::TextureView<back::Backend> {
let device_guard = HUB.devices.read();
let texture_guard = HUB.textures.read();
let (device_guard, mut token) = HUB.devices.read(token);
let (texture_guard, _) = HUB.textures.read(&mut token);
let texture = &texture_guard[texture_id];
let raw = unsafe {
@ -817,9 +861,17 @@ pub fn texture_create_view(
}
}
pub fn device_track_view(texture_id: TextureId, view_id: TextureViewId, ref_count: RefCount) {
let device_id = HUB.textures.read()[texture_id].device_id.value;
let ok = HUB.devices.read()[device_id]
pub fn device_track_view(
texture_id: TextureId,
view_id: TextureViewId,
ref_count: RefCount,
token: &mut Token<Root>,
) {
let (device_guard, mut token) = HUB.devices.read(token);
let (texture_guard, _) = HUB.textures.read(&mut token);
let device_id = texture_guard[texture_id].device_id.value;
let ok = device_guard[device_id]
.trackers
.lock()
.views
@ -833,6 +885,7 @@ pub extern "C" fn wgpu_texture_create_view(
texture_id: TextureId,
desc: &resource::TextureViewDescriptor,
) -> TextureViewId {
let mut token = Token::root();
let view = texture_create_view(
texture_id,
desc.format,
@ -843,18 +896,20 @@ pub extern "C" fn wgpu_texture_create_view(
layers: desc.base_array_layer as u16
.. (desc.base_array_layer + desc.array_count) as u16,
},
&mut token,
);
let ref_count = view.life_guard.ref_count.clone();
let id = HUB.texture_views.register_local(view);
device_track_view(texture_id, id, ref_count);
let id = HUB.texture_views.register_local(view, &mut token);
device_track_view(texture_id, id, ref_count, &mut token);
id
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_texture_create_default_view(texture_id: TextureId) -> TextureViewId {
let mut token = Token::root();
let (format, view_kind, range) = {
let texture_guard = HUB.textures.read();
let (texture_guard, _) = HUB.textures.read(&mut token);
let texture = &texture_guard[texture_id];
let view_kind = match texture.kind {
hal::image::Kind::D1(_, 1) => hal::image::ViewKind::D1,
@ -865,17 +920,18 @@ pub extern "C" fn wgpu_texture_create_default_view(texture_id: TextureId) -> Tex
};
(texture.format, view_kind, texture.full_range.clone())
};
let view = texture_create_view(texture_id, format, view_kind, range);
let view = texture_create_view(texture_id, format, view_kind, range, &mut token);
let ref_count = view.life_guard.ref_count.clone();
let id = HUB.texture_views.register_local(view);
device_track_view(texture_id, id, ref_count);
let id = HUB.texture_views.register_local(view, &mut token);
device_track_view(texture_id, id, ref_count, &mut token);
id
}
#[no_mangle]
pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
let device_guard = HUB.devices.read();
let texture_guard = HUB.textures.read();
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (texture_guard, _) = HUB.textures.read(&mut token);
let texture = &texture_guard[texture_id];
device_guard[texture.device_id.value]
.pending
@ -888,9 +944,10 @@ pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
#[no_mangle]
pub extern "C" fn wgpu_texture_view_destroy(texture_view_id: TextureViewId) {
let device_guard = HUB.devices.read();
let texture_guard = HUB.textures.read();
let texture_view_guard = HUB.texture_views.read();
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (texture_guard, mut token) = HUB.textures.read(&mut token);
let (texture_view_guard, _) = HUB.texture_views.read(&mut token);
let view = &texture_view_guard[texture_view_id];
let device_id = texture_guard[view.texture_id.value].device_id.value;
device_guard[device_id].pending.lock().destroy(
@ -902,8 +959,9 @@ pub extern "C" fn wgpu_texture_view_destroy(texture_view_id: TextureViewId) {
pub fn device_create_sampler(
device_id: DeviceId,
desc: &resource::SamplerDescriptor,
token: &mut Token<Root>,
) -> resource::Sampler<back::Backend> {
let device_guard = HUB.devices.read();
let (device_guard, _) = HUB.devices.read(token);
let device = &device_guard[device_id];
let info = hal::image::SamplerInfo {
@ -936,13 +994,15 @@ pub extern "C" fn wgpu_device_create_sampler(
device_id: DeviceId,
desc: &resource::SamplerDescriptor,
) -> SamplerId {
let sampler = device_create_sampler(device_id, desc);
HUB.samplers.register_local(sampler)
let mut token = Token::root();
let sampler = device_create_sampler(device_id, desc, &mut token);
HUB.samplers.register_local(sampler, &mut token)
}
pub fn device_create_bind_group_layout(
device_id: DeviceId,
desc: &binding_model::BindGroupLayoutDescriptor,
token: &mut Token<Root>,
) -> binding_model::BindGroupLayout<back::Backend> {
let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length) };
@ -958,7 +1018,8 @@ pub fn device_create_bind_group_layout(
.collect::<Vec<_>>(); //TODO: avoid heap allocation
let raw = unsafe {
HUB.devices.read()[device_id]
let (device_guard, _) = HUB.devices.read(token);
device_guard[device_id]
.raw
.create_descriptor_set_layout(&raw_bindings, &[])
.unwrap()
@ -985,18 +1046,20 @@ pub extern "C" fn wgpu_device_create_bind_group_layout(
device_id: DeviceId,
desc: &binding_model::BindGroupLayoutDescriptor,
) -> BindGroupLayoutId {
let layout = device_create_bind_group_layout(device_id, desc);
HUB.bind_group_layouts.register_local(layout)
let mut token = Token::root();
let layout = device_create_bind_group_layout(device_id, desc, &mut token);
HUB.bind_group_layouts.register_local(layout, &mut token)
}
pub fn device_create_pipeline_layout(
device_id: DeviceId,
desc: &binding_model::PipelineLayoutDescriptor,
token: &mut Token<Root>,
) -> binding_model::PipelineLayout<back::Backend> {
let device_guard = HUB.devices.read();
let (device_guard, mut token) = HUB.devices.read(token);
let bind_group_layout_ids =
unsafe { slice::from_raw_parts(desc.bind_group_layouts, desc.bind_group_layouts_length) };
let bind_group_layout_guard = HUB.bind_group_layouts.read();
let (bind_group_layout_guard, _) = HUB.bind_group_layouts.read(&mut token);
let descriptor_set_layouts = bind_group_layout_ids
.iter()
.map(|&id| &bind_group_layout_guard[id].raw);
@ -1021,17 +1084,19 @@ pub extern "C" fn wgpu_device_create_pipeline_layout(
device_id: DeviceId,
desc: &binding_model::PipelineLayoutDescriptor,
) -> PipelineLayoutId {
let layout = device_create_pipeline_layout(device_id, desc);
HUB.pipeline_layouts.register_local(layout)
let mut token = Token::root();
let layout = device_create_pipeline_layout(device_id, desc, &mut token);
HUB.pipeline_layouts.register_local(layout, &mut token)
}
pub fn device_create_bind_group(
device_id: DeviceId,
desc: &binding_model::BindGroupDescriptor,
token: &mut Token<Root>,
) -> binding_model::BindGroup<back::Backend> {
let device_guard = HUB.devices.read();
let (device_guard, mut token) = HUB.devices.read(token);
let device = &device_guard[device_id];
let bind_group_layout_guard = HUB.bind_group_layouts.read();
let (bind_group_layout_guard, _) = HUB.bind_group_layouts.read(&mut token);
let bind_group_layout = &bind_group_layout_guard[desc.layout];
let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length as usize) };
assert_eq!(bindings.len(), bind_group_layout.bindings.len());
@ -1051,9 +1116,10 @@ pub fn device_create_bind_group(
desc_sets.pop().unwrap()
};
let buffer_guard = HUB.buffers.read();
let sampler_guard = HUB.samplers.read();
let texture_view_guard = HUB.texture_views.read();
let (buffer_guard, mut token) = HUB.buffers.read(&mut token);
let (_, mut token) = HUB.textures.read(&mut token); //skip token
let (texture_view_guard, mut token) = HUB.texture_views.read(&mut token);
let (sampler_guard, _) = HUB.samplers.read(&mut token);
//TODO: group writes into contiguous sections
let mut writes = Vec::new();
@ -1149,8 +1215,10 @@ pub fn device_track_bind_group(
device_id: DeviceId,
bind_group_id: BindGroupId,
ref_count: RefCount,
token: &mut Token<Root>,
) {
let ok = HUB.devices.read()[device_id]
let (device_guard, _) = HUB.devices.read(token);
let ok = device_guard[device_id]
.trackers
.lock()
.bind_groups
@ -1164,18 +1232,19 @@ pub extern "C" fn wgpu_device_create_bind_group(
device_id: DeviceId,
desc: &binding_model::BindGroupDescriptor,
) -> BindGroupId {
let bind_group = device_create_bind_group(device_id, desc);
let mut token = Token::root();
let bind_group = device_create_bind_group(device_id, desc, &mut token);
let ref_count = bind_group.life_guard.ref_count.clone();
let id = HUB.bind_groups.register_local(bind_group);
device_track_bind_group(device_id, id, ref_count);
let id = HUB.bind_groups.register_local(bind_group, &mut token);
device_track_bind_group(device_id, id, ref_count, &mut token);
id
}
#[no_mangle]
pub extern "C" fn wgpu_bind_group_destroy(bind_group_id: BindGroupId) {
let device_guard = HUB.devices.read();
let bind_group_guard = HUB.bind_groups.read();
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (bind_group_guard, _) = HUB.bind_groups.read(&mut token);
let bind_group = &bind_group_guard[bind_group_id];
device_guard[bind_group.device_id.value].pending.lock().destroy(
ResourceId::BindGroup(bind_group_id),
@ -1186,10 +1255,12 @@ pub extern "C" fn wgpu_bind_group_destroy(bind_group_id: BindGroupId) {
pub fn device_create_shader_module(
device_id: DeviceId,
desc: &pipeline::ShaderModuleDescriptor,
token: &mut Token<Root>,
) -> ShaderModule<back::Backend> {
let spv = unsafe { slice::from_raw_parts(desc.code.bytes, desc.code.length) };
let (device_guard, _) = HUB.devices.read(token);
let shader = unsafe {
HUB.devices.read()[device_id]
device_guard[device_id]
.raw
.create_shader_module(spv)
.unwrap()
@ -1204,15 +1275,17 @@ pub extern "C" fn wgpu_device_create_shader_module(
device_id: DeviceId,
desc: &pipeline::ShaderModuleDescriptor,
) -> ShaderModuleId {
let module = device_create_shader_module(device_id, desc);
HUB.shader_modules.register_local(module)
let mut token = Token::root();
let module = device_create_shader_module(device_id, desc, &mut token);
HUB.shader_modules.register_local(module, &mut token)
}
pub fn device_create_command_encoder(
device_id: DeviceId,
_desc: &command::CommandEncoderDescriptor,
token: &mut Token<Root>,
) -> command::CommandBuffer<back::Backend> {
let device_guard = HUB.devices.read();
let (device_guard, _) = HUB.devices.read(token);
let device = &device_guard[device_id];
let dev_stored = Stored {
@ -1235,8 +1308,9 @@ pub extern "C" fn wgpu_device_create_command_encoder(
device_id: DeviceId,
desc: &command::CommandEncoderDescriptor,
) -> CommandEncoderId {
let cmb = device_create_command_encoder(device_id, desc);
HUB.command_buffers.register_local(cmb)
let mut token = Token::root();
let cmb = device_create_command_encoder(device_id, desc, &mut token);
HUB.command_buffers.register_local(cmb, &mut token)
}
#[no_mangle]
@ -1254,8 +1328,9 @@ pub extern "C" fn wgpu_queue_submit(
unsafe { slice::from_raw_parts(command_buffers, command_buffers_length) };
let (submit_index, fence) = {
let surface_guard = HUB.surfaces.read();
let mut device_guard = HUB.devices.write();
let mut token = Token::root();
let (surface_guard, mut token) = HUB.surfaces.read(&mut token);
let (mut device_guard, mut token) = HUB.devices.write(&mut token);
let device = &mut device_guard[queue_id];
let mut trackers = device.trackers.lock();
let mut wait_semaphores = Vec::new();
@ -1269,11 +1344,11 @@ pub extern "C" fn wgpu_queue_submit(
// native command buffer of the previous chain instead of always creating
// a temporary one, since the chains are not finished.
{
let mut command_buffer_guard = HUB.command_buffers.write();
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let texture_view_guard = HUB.texture_views.read();
let bind_group_guard = HUB.bind_groups.read();
let (mut command_buffer_guard, mut token) = HUB.command_buffers.write(&mut token);
let (bind_group_guard, mut token) = HUB.bind_groups.read(&mut token);
let (buffer_guard, mut token) = HUB.buffers.read(&mut token);
let (texture_guard, mut token) = HUB.textures.read(&mut token);
let (texture_view_guard, _) = HUB.texture_views.read(&mut token);
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
@ -1353,7 +1428,7 @@ pub extern "C" fn wgpu_queue_submit(
// now prepare the GPU submission
let fence = device.raw.create_fence(false).unwrap();
{
let command_buffer_guard = HUB.command_buffers.read();
let (command_buffer_guard, _) = HUB.command_buffers.read(&mut token);
let submission =
hal::queue::Submission::<_, _, &[<back::Backend as hal::Backend>::Semaphore]> {
//TODO: may `OneShot` be enough?
@ -1376,10 +1451,11 @@ pub extern "C" fn wgpu_queue_submit(
// No need for write access to the device from here on out
let callbacks = {
let device_guard = HUB.devices.read();
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let device = &device_guard[queue_id];
let callbacks = device.maintain(false);
let callbacks = device.maintain(false, &mut token);
device.pending.lock().active.alloc().init(ActiveSubmission {
index: submit_index,
fence,
@ -1389,7 +1465,7 @@ pub extern "C" fn wgpu_queue_submit(
// finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids {
let cmd_buf = HUB.command_buffers.unregister(cmb_id);
let (cmd_buf, _) = HUB.command_buffers.unregister(cmb_id, &mut token);
device.com_allocator.after_submit(cmd_buf, submit_index);
}
@ -1402,17 +1478,18 @@ pub extern "C" fn wgpu_queue_submit(
pub fn device_create_render_pipeline(
device_id: DeviceId,
desc: &pipeline::RenderPipelineDescriptor,
token: &mut Token<Root>,
) -> pipeline::RenderPipeline<back::Backend> {
let sc = desc.sample_count;
assert!(sc == 1 || sc == 2 || sc == 4 || sc == 8 || sc == 16 || sc == 32,
"Invalid sample_count of {}", sc);
let sc = sc as u8;
let device_guard = HUB.devices.read();
let (device_guard, mut token) = HUB.devices.read(token);
let device = &device_guard[device_id];
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let (pipeline_layout_guard, mut token) = HUB.pipeline_layouts.read(&mut token);
let layout = &pipeline_layout_guard[desc.layout].raw;
let shader_module_guard = HUB.shader_modules.read();
let (shader_module_guard, _) = HUB.shader_modules.read(&mut token);
let color_states =
unsafe { slice::from_raw_parts(desc.color_states, desc.color_states_length) };
@ -1647,20 +1724,22 @@ pub extern "C" fn wgpu_device_create_render_pipeline(
device_id: DeviceId,
desc: &pipeline::RenderPipelineDescriptor,
) -> RenderPipelineId {
let pipeline = device_create_render_pipeline(device_id, desc);
HUB.render_pipelines.register_local(pipeline)
let mut token = Token::root();
let pipeline = device_create_render_pipeline(device_id, desc, &mut token);
HUB.render_pipelines.register_local(pipeline, &mut token)
}
pub fn device_create_compute_pipeline(
device_id: DeviceId,
desc: &pipeline::ComputePipelineDescriptor,
token: &mut Token<Root>,
) -> pipeline::ComputePipeline<back::Backend> {
let device_guard = HUB.devices.read();
let (device_guard, mut token) = HUB.devices.read(token);
let device = &device_guard[device_id].raw;
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let (pipeline_layout_guard, mut token) = HUB.pipeline_layouts.read(&mut token);
let layout = &pipeline_layout_guard[desc.layout].raw;
let pipeline_stage = &desc.compute_stage;
let shader_module_guard = HUB.shader_modules.read();
let (shader_module_guard, _) = HUB.shader_modules.read(&mut token);
let shader = hal::pso::EntryPoint::<back::Backend> {
entry: unsafe { ffi::CStr::from_ptr(pipeline_stage.entry_point) }
@ -1701,24 +1780,26 @@ pub extern "C" fn wgpu_device_create_compute_pipeline(
device_id: DeviceId,
desc: &pipeline::ComputePipelineDescriptor,
) -> ComputePipelineId {
let pipeline = device_create_compute_pipeline(device_id, desc);
HUB.compute_pipelines.register_local(pipeline)
let mut token = Token::root();
let pipeline = device_create_compute_pipeline(device_id, desc, &mut token);
HUB.compute_pipelines.register_local(pipeline, &mut token)
}
pub fn device_create_swap_chain(
device_id: DeviceId,
surface_id: SurfaceId,
desc: &swap_chain::SwapChainDescriptor,
token: &mut Token<Root>,
) -> Vec<resource::Texture<back::Backend>> {
info!("creating swap chain {:?}", desc);
let mut surface_guard = HUB.surfaces.write();
let device_guard = HUB.devices.read();
let (mut surface_guard, mut token) = HUB.surfaces.write(token);
let (adapter_guard, mut token) = HUB.adapters.read(&mut token);
let (device_guard, _) = HUB.devices.read(&mut token);
let device = &device_guard[device_id];
let surface = &mut surface_guard[surface_id];
let (caps, formats, _present_modes) = {
let adapter_guard = HUB.adapters.read();
let adapter = &adapter_guard[device.adapter_id];
assert!(surface
.raw
@ -1828,10 +1909,11 @@ pub fn device_create_swap_chain(
pub fn swap_chain_populate_textures(
swap_chain_id: SwapChainId,
textures: Vec<resource::Texture<back::Backend>>,
token: &mut Token<Root>,
) {
let mut surface_guard = HUB.surfaces.write();
let (mut surface_guard, mut token) = HUB.surfaces.write(token);
let swap_chain = surface_guard[swap_chain_id].swap_chain.as_mut().unwrap();
let device_guard = HUB.devices.read();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let device = &device_guard[swap_chain.device_id.value];
let mut trackers = device.trackers.lock();
@ -1859,7 +1941,7 @@ pub fn swap_chain_populate_textures(
});
let texture_id = Stored {
ref_count: texture.life_guard.ref_count.clone(),
value: HUB.textures.register_local(texture),
value: HUB.textures.register_local(texture, &mut token),
};
trackers.textures.init(
texture_id.value,
@ -1880,7 +1962,7 @@ pub fn swap_chain_populate_textures(
};
let view_id = Stored {
ref_count: view.life_guard.ref_count.clone(),
value: HUB.texture_views.register_local(view),
value: HUB.texture_views.register_local(view, &mut token),
};
trackers
.views
@ -1906,21 +1988,23 @@ pub extern "C" fn wgpu_device_create_swap_chain(
surface_id: SurfaceId,
desc: &swap_chain::SwapChainDescriptor,
) -> SwapChainId {
let textures = device_create_swap_chain(device_id, surface_id, desc);
swap_chain_populate_textures(surface_id, textures);
let mut token = Token::root();
let textures = device_create_swap_chain(device_id, surface_id, desc, &mut token);
swap_chain_populate_textures(surface_id, textures, &mut token);
surface_id
}
#[no_mangle]
pub extern "C" fn wgpu_device_poll(device_id: DeviceId, force_wait: bool) {
let callbacks = HUB.devices.read()[device_id].maintain(force_wait);
let (device_guard, mut token) = HUB.devices.read(&mut Token::root());
let callbacks = device_guard[device_id].maintain(force_wait, &mut token);
Device::fire_map_callbacks(callbacks);
}
#[no_mangle]
pub extern "C" fn wgpu_device_destroy(device_id: DeviceId) {
let device = HUB.devices.unregister(device_id);
device.maintain(true);
let (device, mut token) = HUB.devices.unregister(device_id, &mut Token::root());
device.maintain(true, &mut token);
device.com_allocator.destroy(&device.raw);
}
@ -1937,8 +2021,11 @@ pub extern "C" fn wgpu_buffer_map_read_async(
callback: BufferMapReadCallback,
userdata: *mut u8,
) {
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (device_id, ref_count) = {
let mut buffer_guard = HUB.buffers.write();
let (mut buffer_guard, _) = HUB.buffers.write(&mut token);
let buffer = &mut buffer_guard[buffer_id];
if buffer.pending_map_operation.is_some() {
@ -1952,7 +2039,6 @@ pub extern "C" fn wgpu_buffer_map_read_async(
(buffer.device_id.value, buffer.life_guard.ref_count.clone())
};
let device_guard = HUB.devices.read();
let device = &device_guard[device_id];
device
@ -1972,8 +2058,11 @@ pub extern "C" fn wgpu_buffer_map_write_async(
callback: BufferMapWriteCallback,
userdata: *mut u8,
) {
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (device_id, ref_count) = {
let mut buffer_guard = HUB.buffers.write();
let (mut buffer_guard, _) = HUB.buffers.write(&mut token);
let buffer = &mut buffer_guard[buffer_id];
if buffer.pending_map_operation.is_some() {
@ -1987,8 +2076,7 @@ pub extern "C" fn wgpu_buffer_map_write_async(
(buffer.device_id.value, buffer.life_guard.ref_count.clone())
};
let device_guard = HUB.devices.read();
let device = &device_guard[device_id];
let device = &device_guard[device_id];
device
.trackers
@ -2001,12 +2089,12 @@ pub extern "C" fn wgpu_buffer_map_write_async(
#[no_mangle]
pub extern "C" fn wgpu_buffer_unmap(buffer_id: BufferId) {
// get the device ID first in order to have a clean locking order
let device_id = HUB.buffers.read()[buffer_id].device_id.value;
let device_guard = HUB.devices.read();
let device_raw = &device_guard[device_id].raw;
let mut buffer_guard = HUB.buffers.write();
let mut token = Token::root();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let (mut buffer_guard, _) = HUB.buffers.write(&mut token);
let buffer = &mut buffer_guard[buffer_id];
let device_raw = &device_guard[buffer.device_id.value].raw;
if !buffer.mapped_write_ranges.is_empty() {
unsafe {

View File

@ -37,20 +37,27 @@ use crate::{
};
#[cfg(not(feature = "gfx-backend-gl"))]
use crate::{InstanceHandle, InstanceId};
use lazy_static::lazy_static;
#[cfg(feature = "local")]
use parking_lot::Mutex;
use parking_lot::RwLock;
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use vec_map::VecMap;
use std::{ops, sync::Arc};
use std::{
cell::Cell,
marker::PhantomData,
ops,
sync::Arc,
};
/// A simple structure to manage identities of objects.
#[derive(Debug)]
pub struct IdentityManager<I: TypedId> {
free: Vec<Index>,
epochs: Vec<Epoch>,
phantom: std::marker::PhantomData<I>,
phantom: PhantomData<I>,
}
impl<I: TypedId> Default for IdentityManager<I> {
@ -58,7 +65,7 @@ impl<I: TypedId> Default for IdentityManager<I> {
IdentityManager {
free: Default::default(),
epochs: Default::default(),
phantom: std::marker::PhantomData,
phantom: PhantomData,
}
}
}
@ -92,7 +99,7 @@ impl<I: TypedId> IdentityManager<I> {
pub struct Storage<T, I: TypedId> {
//TODO: consider concurrent hashmap?
map: VecMap<(T, Epoch)>,
_phantom: std::marker::PhantomData<I>,
_phantom: PhantomData<I>,
}
impl<T, I: TypedId> ops::Index<I> for Storage<T, I> {
@ -119,12 +126,129 @@ impl<T, I: TypedId> Storage<T, I> {
_ => false,
}
}
pub fn remove(&mut self, id: I) -> T {
let (value, epoch) = self.map.remove(id.index() as usize).unwrap();
assert_eq!(epoch, id.epoch());
value
}
}
/// Type system for enforcing the lock order on shared HUB structures.
/// If type A implements `Access<B>`, that means we are allowed to proceed
/// with locking resource `B` after we lock `A`.
///
/// The implenentations basically describe the edges in a directed graph
/// of lock transitions. As long as it doesn't have loops, we can have
/// multiple concurrent paths on this graph (from multiple threads) without
/// deadlocks, i.e. there is always a path whose next resource is not locked
/// by some other path, at any time.
pub trait Access<B> {}
pub enum Root {}
//TODO: establish an order instead of declaring all the pairs.
#[cfg(not(feature = "gfx-backend-gl"))]
impl Access<InstanceHandle> for Root {}
impl Access<SurfaceHandle> for Root {}
#[cfg(not(feature = "gfx-backend-gl"))]
impl Access<SurfaceHandle> for InstanceHandle {}
impl Access<AdapterHandle> for Root {}
impl Access<AdapterHandle> for SurfaceHandle {}
impl Access<DeviceHandle> for Root {}
impl Access<DeviceHandle> for SurfaceHandle {}
impl Access<DeviceHandle> for AdapterHandle {}
impl Access<PipelineLayoutHandle> for Root {}
impl Access<PipelineLayoutHandle> for DeviceHandle {}
impl Access<BindGroupLayoutHandle> for Root {}
impl Access<BindGroupLayoutHandle> for DeviceHandle {}
impl Access<BindGroupHandle> for Root {}
impl Access<BindGroupHandle> for DeviceHandle {}
impl Access<BindGroupHandle> for PipelineLayoutHandle {}
impl Access<BindGroupHandle> for CommandBufferHandle {}
impl Access<CommandBufferHandle> for Root {}
impl Access<CommandBufferHandle> for DeviceHandle {}
impl Access<ComputePassHandle> for Root {}
impl Access<ComputePassHandle> for BindGroupHandle {}
impl Access<ComputePassHandle> for CommandBufferHandle {}
impl Access<RenderPassHandle> for Root {}
impl Access<RenderPassHandle> for BindGroupHandle {}
impl Access<RenderPassHandle> for CommandBufferHandle {}
impl Access<ComputePipelineHandle> for Root {}
impl Access<ComputePipelineHandle> for ComputePassHandle {}
impl Access<RenderPipelineHandle> for Root {}
impl Access<RenderPipelineHandle> for RenderPassHandle {}
impl Access<ShaderModuleHandle> for Root {}
impl Access<ShaderModuleHandle> for PipelineLayoutHandle {}
impl Access<BufferHandle> for Root {}
impl Access<BufferHandle> for DeviceHandle {}
impl Access<BufferHandle> for BindGroupLayoutHandle {}
impl Access<BufferHandle> for BindGroupHandle {}
impl Access<BufferHandle> for CommandBufferHandle {}
impl Access<BufferHandle> for ComputePassHandle {}
impl Access<BufferHandle> for ComputePipelineHandle {}
impl Access<BufferHandle> for RenderPassHandle {}
impl Access<BufferHandle> for RenderPipelineHandle {}
impl Access<TextureHandle> for Root {}
impl Access<TextureHandle> for DeviceHandle {}
impl Access<TextureHandle> for BufferHandle {}
impl Access<TextureViewHandle> for Root {}
impl Access<TextureViewHandle> for DeviceHandle {}
impl Access<TextureViewHandle> for TextureHandle {}
impl Access<SamplerHandle> for Root {}
impl Access<SamplerHandle> for TextureViewHandle {}
thread_local! {
static ACTIVE_TOKEN: Cell<bool> = Cell::new(false);
}
/// A permission token to lock resource `T` or anything after it,
/// as defined by the `Access` implementations.
///
/// Note: there can only be one non-borrowed `Token` alive on a thread
/// at a time, which is enforced by `ACTIVE_TOKEN`.
pub struct Token<'a, T: 'a> {
level: PhantomData<&'a T>,
is_root: bool,
}
impl<'a, T> Token<'a, T> {
fn new() -> Self {
Token {
level: PhantomData,
is_root: false,
}
}
}
impl Token<'static, Root> {
pub fn root() -> Self {
ACTIVE_TOKEN.with(|active| {
assert!(!active.replace(true));
});
Token {
level: PhantomData,
is_root: true,
}
}
}
impl<'a, T> Drop for Token<'a, T> {
fn drop(&mut self) {
if self.is_root {
ACTIVE_TOKEN.with(|active| {
assert!(active.replace(false));
});
}
}
}
#[derive(Debug)]
pub struct Registry<T, I: TypedId> {
#[cfg(feature = "local")]
identity: Mutex<IdentityManager<I>>,
pub identity: Mutex<IdentityManager<I>>,
data: RwLock<Storage<T, I>>,
}
@ -135,27 +259,16 @@ impl<T, I: TypedId> Default for Registry<T, I> {
identity: Mutex::new(IdentityManager::default()),
data: RwLock::new(Storage {
map: VecMap::new(),
_phantom: std::marker::PhantomData,
_phantom: PhantomData,
}),
}
}
}
impl<T, I: TypedId> ops::Deref for Registry<T, I> {
type Target = RwLock<Storage<T, I>>;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T, I: TypedId> ops::DerefMut for Registry<T, I> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
impl<T, I: TypedId + Copy> Registry<T, I> {
pub fn register(&self, id: I, value: T) {
pub fn register<A: Access<T>>(
&self, id: I, value: T, _token: &mut Token<A>
) {
let old = self
.data
.write()
@ -165,19 +278,34 @@ impl<T, I: TypedId + Copy> Registry<T, I> {
}
#[cfg(feature = "local")]
pub fn register_local(&self, value: T) -> I {
pub fn register_local<A: Access<T>>(
&self, value: T, token: &mut Token<A>
) -> I {
let id = self.identity.lock().alloc();
self.register(id, value);
self.register(id, value, token);
id
}
pub fn unregister(&self, id: I) -> T {
let (value, epoch) = self.data.write().map.remove(id.index() as usize).unwrap();
assert_eq!(epoch, id.epoch());
pub fn unregister<A: Access<T>>(
&self, id: I, _token: &mut Token<A>
) -> (T, Token<T>) {
let value = self.data.write().remove(id);
//Note: careful about the order here!
#[cfg(feature = "local")]
self.identity.lock().free(id);
value
(value, Token::new())
}
pub fn read<A: Access<T>>(
&self, _token: &mut Token<A>
) -> (RwLockReadGuard<Storage<T, I>>, Token<T>) {
(self.data.read(), Token::new())
}
pub fn write<A: Access<T>>(
&self, _token: &mut Token<A>
) -> (RwLockWriteGuard<Storage<T, I>>, Token<T>) {
(self.data.write(), Token::new())
}
}
@ -185,19 +313,18 @@ impl<T, I: TypedId + Copy> Registry<T, I> {
pub struct Hub {
#[cfg(not(feature = "gfx-backend-gl"))]
pub instances: Arc<Registry<InstanceHandle, InstanceId>>,
pub surfaces: Arc<Registry<SurfaceHandle, SurfaceId>>,
pub adapters: Arc<Registry<AdapterHandle, AdapterId>>,
pub devices: Arc<Registry<DeviceHandle, DeviceId>>,
pub pipeline_layouts: Arc<Registry<PipelineLayoutHandle, PipelineLayoutId>>,
pub shader_modules: Arc<Registry<ShaderModuleHandle, ShaderModuleId>>,
pub bind_group_layouts: Arc<Registry<BindGroupLayoutHandle, BindGroupLayoutId>>,
pub bind_groups: Arc<Registry<BindGroupHandle, BindGroupId>>,
pub shader_modules: Arc<Registry<ShaderModuleHandle, ShaderModuleId>>,
pub command_buffers: Arc<Registry<CommandBufferHandle, CommandBufferId>>,
pub render_pipelines: Arc<Registry<RenderPipelineHandle, RenderPipelineId>>,
pub compute_pipelines: Arc<Registry<ComputePipelineHandle, ComputePipelineId>>,
pub render_passes: Arc<Registry<RenderPassHandle, RenderPassId>>,
pub render_pipelines: Arc<Registry<RenderPipelineHandle, RenderPipelineId>>,
pub compute_passes: Arc<Registry<ComputePassHandle, ComputePassId>>,
pub compute_pipelines: Arc<Registry<ComputePipelineHandle, ComputePipelineId>>,
pub buffers: Arc<Registry<BufferHandle, BufferId>>,
pub textures: Arc<Registry<TextureHandle, TextureId>>,
pub texture_views: Arc<Registry<TextureViewHandle, TextureViewId>>,

View File

@ -1,6 +1,6 @@
use crate::{
binding_model::MAX_BIND_GROUPS,
hub::HUB,
hub::{HUB, Root, Token},
AdapterHandle,
AdapterId,
DeviceHandle,
@ -72,14 +72,14 @@ pub fn create_instance() -> ::back::Instance {
#[no_mangle]
pub extern "C" fn wgpu_create_instance() -> InstanceId {
let inst = create_instance();
HUB.instances.register_local(inst)
HUB.instances.register_local(inst, &mut Token::root())
}
#[cfg(all(feature = "local", feature = "gfx-backend-gl"))]
pub fn wgpu_create_gl_instance(windowed_context: back::glutin::WindowedContext) -> InstanceId {
let raw = back::Surface::from_window(windowed_context);
let surface = SurfaceHandle::new(raw);
HUB.surfaces.register_local(surface)
HUB.surfaces.register_local(surface, &mut Token::root())
}
#[cfg(all(feature = "window-winit", not(feature = "gfx-backend-gl")))]
@ -88,9 +88,11 @@ pub extern "C" fn wgpu_instance_create_surface_from_winit(
instance_id: InstanceId,
window: &winit::Window,
) -> SurfaceId {
let raw = HUB.instances.read()[instance_id].create_surface(window);
let mut token = Token::root();
let (instance_guard, mut token) = HUB.instances.read(&mut token);
let raw = instance_guard[instance_id].create_surface(window);
let surface = SurfaceHandle::new(raw);
HUB.surfaces.register_local(surface)
HUB.surfaces.register_local(surface, &mut token)
}
#[cfg(not(feature = "gfx-backend-gl"))]
@ -99,12 +101,16 @@ pub fn instance_create_surface_from_xlib(
instance_id: InstanceId,
display: *mut *const std::ffi::c_void,
window: u64,
token: &mut Token<Root>,
) -> SurfaceHandle {
#[cfg(not(all(unix, feature = "gfx-backend-vulkan")))]
unimplemented!();
#[cfg(all(unix, feature = "gfx-backend-vulkan"))]
SurfaceHandle::new(HUB.instances.read()[instance_id].create_surface_from_xlib(display, window))
{
let (instance_guard, _) = HUB.instances.read(token);
SurfaceHandle::new(instance_guard[instance_id].create_surface_from_xlib(display, window))
}
}
#[cfg(all(feature = "local", not(feature = "gfx-backend-gl")))]
@ -114,8 +120,9 @@ pub extern "C" fn wgpu_instance_create_surface_from_xlib(
display: *mut *const std::ffi::c_void,
window: u64,
) -> SurfaceId {
let surface = instance_create_surface_from_xlib(instance_id, display, window);
HUB.surfaces.register_local(surface)
let mut token = Token::root();
let surface = instance_create_surface_from_xlib(instance_id, display, window, &mut token);
HUB.surfaces.register_local(surface, &mut token)
}
#[cfg(not(feature = "gfx-backend-gl"))]
@ -123,15 +130,19 @@ pub extern "C" fn wgpu_instance_create_surface_from_xlib(
pub fn instance_create_surface_from_macos_layer(
instance_id: InstanceId,
layer: *mut std::ffi::c_void,
token: &mut Token<Root>,
) -> SurfaceHandle {
#[cfg(not(feature = "gfx-backend-metal"))]
unimplemented!();
#[cfg(feature = "gfx-backend-metal")]
SurfaceHandle::new(
HUB.instances.read()[instance_id]
.create_surface_from_layer(layer as *mut _, cfg!(debug_assertions)),
)
{
let (instance_guard, _) = HUB.instances.read(token);
SurfaceHandle::new(
instance_guard[instance_id]
.create_surface_from_layer(layer as *mut _, cfg!(debug_assertions)),
)
}
}
#[cfg(not(feature = "gfx-backend-gl"))]
@ -141,8 +152,9 @@ pub extern "C" fn wgpu_instance_create_surface_from_macos_layer(
instance_id: InstanceId,
layer: *mut std::ffi::c_void,
) -> SurfaceId {
let surface = instance_create_surface_from_macos_layer(instance_id, layer);
HUB.surfaces.register_local(surface)
let mut token = Token::root();
let surface = instance_create_surface_from_macos_layer(instance_id, layer, &mut token);
HUB.surfaces.register_local(surface, &mut token)
}
#[cfg(not(feature = "gfx-backend-gl"))]
@ -151,7 +163,10 @@ pub fn instance_create_surface_from_windows_hwnd(
instance_id: InstanceId,
hinstance: *mut std::ffi::c_void,
hwnd: *mut std::ffi::c_void,
token: &mut Token<Root>,
) -> SurfaceHandle {
let (instance_guard, _) = HUB.instances.read(token);
#[cfg(not(any(
feature = "gfx-backend-dx11",
feature = "gfx-backend-dx12",
@ -160,10 +175,10 @@ pub fn instance_create_surface_from_windows_hwnd(
let raw = unimplemented!();
#[cfg(any(feature = "gfx-backend-dx11", feature = "gfx-backend-dx12"))]
let raw = HUB.instances.read()[instance_id].create_surface_from_hwnd(hwnd);
let raw = instance_guard[instance_id].create_surface_from_hwnd(hwnd);
#[cfg(all(target_os = "windows", feature = "gfx-backend-vulkan"))]
let raw = HUB.instances.read()[instance_id].create_surface_from_hwnd(hinstance, hwnd);
let raw = instance_guard[instance_id].create_surface_from_hwnd(hinstance, hwnd);
#[allow(unreachable_code)]
SurfaceHandle::new(raw)
@ -177,8 +192,9 @@ pub extern "C" fn wgpu_instance_create_surface_from_windows_hwnd(
hinstance: *mut std::ffi::c_void,
hwnd: *mut std::ffi::c_void,
) -> SurfaceId {
let surface = instance_create_surface_from_windows_hwnd(instance_id, hinstance, hwnd);
HUB.surfaces.register_local(surface)
let mut token = Token::root();
let surface = instance_create_surface_from_windows_hwnd(instance_id, hinstance, hwnd, &mut token);
HUB.surfaces.register_local(surface, &mut token)
}
#[cfg(all(feature = "local", feature = "gfx-backend-gl"))]
@ -186,18 +202,20 @@ pub fn wgpu_instance_get_gl_surface(instance_id: InstanceId) -> SurfaceId {
instance_id
}
pub fn instance_get_adapter(instance_id: InstanceId, desc: &AdapterDescriptor) -> AdapterHandle {
pub fn instance_get_adapter(
instance_id: InstanceId,
desc: &AdapterDescriptor,
token: &mut Token<Root>,
) -> AdapterHandle {
#[cfg(not(feature = "gfx-backend-gl"))]
let adapters = {
let instance_guard = HUB.instances.read();
let instance = &instance_guard[instance_id];
instance.enumerate_adapters()
let (instance_guard, _) = HUB.instances.read(token);
instance_guard[instance_id].enumerate_adapters()
};
#[cfg(feature = "gfx-backend-gl")]
let adapters = {
let surface_guard = HUB.surfaces.read();
let surface = &surface_guard[instance_id];
surface.raw.enumerate_adapters()
let (surface_guard, _) = HUB.surfaces.read(token);
surface_guard[instance_id].raw.enumerate_adapters()
};
let (mut integrated_first, mut discrete_first, mut discrete_last, mut alternative) =
@ -242,7 +260,8 @@ pub extern "C" fn wgpu_instance_get_adapter(
instance_id: InstanceId,
desc: &AdapterDescriptor,
) -> AdapterId {
let adapter = instance_get_adapter(instance_id, desc);
let mut token = Token::root();
let adapter = instance_get_adapter(instance_id, desc, &mut token);
let limits = adapter.physical_device.limits();
info!("Adapter {:?}", adapter.info);
@ -256,11 +275,15 @@ pub extern "C" fn wgpu_instance_get_adapter(
"Adapter uniform buffer offset alignment not compatible with WGPU"
);
HUB.adapters.register_local(adapter)
HUB.adapters.register_local(adapter, &mut token)
}
pub fn adapter_create_device(adapter_id: AdapterId, _desc: &DeviceDescriptor) -> DeviceHandle {
let adapter_guard = HUB.adapters.read();
pub fn adapter_create_device(
adapter_id: AdapterId,
_desc: &DeviceDescriptor,
token: &mut Token<Root>,
) -> DeviceHandle {
let (adapter_guard, _) = HUB.adapters.read(token);
let adapter = &adapter_guard[adapter_id];
let (raw, queue_group) = adapter.open_with::<_, hal::General>(1, |_qf| true).unwrap();
let mem_props = adapter.physical_device.memory_properties();
@ -274,6 +297,7 @@ pub extern "C" fn wgpu_adapter_request_device(
adapter_id: AdapterId,
desc: &DeviceDescriptor,
) -> DeviceId {
let device = adapter_create_device(adapter_id, desc);
HUB.devices.register_local(device)
let mut token = Token::root();
let device = adapter_create_device(adapter_id, desc, &mut token);
HUB.devices.register_local(device, &mut token)
}

View File

@ -35,7 +35,7 @@ pub use self::binding_model::*;
pub use self::command::*;
pub use self::device::*;
#[cfg(not(feature = "local"))]
pub use self::hub::{IdentityManager, Registry, HUB};
pub use self::hub::{Access, IdentityManager, Registry, Token, HUB};
pub use self::instance::*;
pub use self::pipeline::*;
pub use self::resource::*;

View File

@ -1,7 +1,7 @@
use crate::{
conv,
device::all_image_stages,
hub::HUB,
hub::{HUB, Token},
resource,
DeviceId,
Extent3d,
@ -116,8 +116,9 @@ pub struct SwapChainOutput {
#[no_mangle]
pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -> SwapChainOutput {
let mut token = Token::root();
let (image_index, device_id, descriptor) = {
let mut surface_guard = HUB.surfaces.write();
let (mut surface_guard, _) = HUB.surfaces.write(&mut token);
let swap_chain = surface_guard[swap_chain_id].swap_chain.as_mut().unwrap();
let result = unsafe {
swap_chain
@ -138,12 +139,12 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -
use crate::device::{device_create_swap_chain, swap_chain_populate_textures};
if image_index.is_none() {
warn!("acquire_image failed, re-creating");
let textures = device_create_swap_chain(device_id, swap_chain_id, &descriptor);
swap_chain_populate_textures(swap_chain_id, textures);
let textures = device_create_swap_chain(device_id, swap_chain_id, &descriptor, &mut token);
swap_chain_populate_textures(swap_chain_id, textures, &mut token);
}
}
let mut surface_guard = HUB.surfaces.write();
let (mut surface_guard, mut token) = HUB.surfaces.write(&mut token);
let swap_chain = surface_guard[swap_chain_id].swap_chain.as_mut().unwrap();
let image_index = match image_index {
@ -162,7 +163,7 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -
},
};
let device_guard = HUB.devices.read();
let (device_guard, mut token) = HUB.devices.read(&mut token);
let device = &device_guard[device_id];
assert_ne!(
@ -187,7 +188,8 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -
mem::swap(&mut frame.sem_available, &mut swap_chain.sem_available);
frame.need_waiting.store(true, Ordering::Release);
let frame_epoch = HUB.textures.read()[frame.texture_id.value]
let (texture_guard, _) = HUB.textures.read(&mut token);
let frame_epoch = texture_guard[frame.texture_id.value]
.placement
.as_swap_chain()
.bump_epoch();
@ -206,7 +208,8 @@ pub extern "C" fn wgpu_swap_chain_get_next_texture(swap_chain_id: SwapChainId) -
#[no_mangle]
pub extern "C" fn wgpu_swap_chain_present(swap_chain_id: SwapChainId) {
let mut surface_guard = HUB.surfaces.write();
let mut token = Token::root();
let (mut surface_guard, mut token) = HUB.surfaces.write(&mut token);
let swap_chain = surface_guard[swap_chain_id].swap_chain.as_mut().unwrap();
let image_index = swap_chain.acquired.remove(0);
@ -223,10 +226,10 @@ pub extern "C" fn wgpu_swap_chain_present(swap_chain_id: SwapChainId) {
image_index
);
let mut device_guard = HUB.devices.write();
let (mut device_guard, mut token) = HUB.devices.write(&mut token);
let device = &mut device_guard[swap_chain.device_id.value];
let texture_guard = HUB.textures.read();
let (texture_guard, _) = HUB.textures.read(&mut token);
let texture = &texture_guard[frame.texture_id.value];
texture.placement.as_swap_chain().bump_epoch();

View File

@ -20,22 +20,23 @@ enum ControlFlow {
}
fn process(message: GlobalMessage) -> ControlFlow {
let mut token = &mut wgn::Token::root();
match message {
GlobalMessage::Instance(msg) => match msg {
InstanceMessage::Create(instance_id) => {
let instance = wgn::create_instance();
wgn::HUB.instances.register(instance_id, instance);
wgn::HUB.instances.register(instance_id, instance, &mut token);
}
InstanceMessage::InstanceGetAdapter(instance_id, ref desc, id) => {
let adapter = wgn::instance_get_adapter(instance_id, desc);
wgn::HUB.adapters.register(id, adapter);
let adapter = wgn::instance_get_adapter(instance_id, desc, &mut token);
wgn::HUB.adapters.register(id, adapter, &mut token);
}
InstanceMessage::AdapterCreateDevice(adapter_id, ref desc, id) => {
let device = wgn::adapter_create_device(adapter_id, desc);
wgn::HUB.devices.register(id, device);
let device = wgn::adapter_create_device(adapter_id, desc, &mut token);
wgn::HUB.devices.register(id, device, &mut token);
}
InstanceMessage::Destroy(instance_id) => {
wgn::HUB.instances.unregister(instance_id);
wgn::HUB.instances.unregister(instance_id, &mut token);
}
},
GlobalMessage::Terminate => return ControlFlow::Terminate,