Resource destruction refactoring, hook up to rust

This commit is contained in:
Dzmitry Malyshau 2019-02-21 08:42:57 -05:00
parent a7476ee69a
commit 855bcfe176
8 changed files with 170 additions and 135 deletions

View File

@ -92,7 +92,6 @@ fn main() {
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group);
cpass.dispatch(numbers.len() as u32, 1, 1);
cpass.end_pass();
}
encoder.copy_buffer_tobuffer(&storage_buffer, 0, &staging_buffer, 0, size);

View File

@ -108,7 +108,6 @@ fn main() {
});
rpass.set_pipeline(&render_pipeline);
rpass.draw(0..3, 0..1);
rpass.end_pass();
}
device

View File

@ -318,7 +318,6 @@ impl framework::Example for Cube {
rpass.set_index_buffer(&self.index_buf, 0);
rpass.set_vertex_buffers(&[(&self.vertex_buf, 0)]);
rpass.draw_indexed(0 .. self.index_count as u32, 0, 0..1);
rpass.end_pass();
}
device

View File

@ -34,9 +34,7 @@ impl<B: hal::Backend> ComputePass<B> {
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_end_pass(pass_id: ComputePassId) -> CommandBufferId {
#[cfg(feature = "local")]
HUB.compute_passes.unregister(pass_id);
let pass = HUB.compute_passes.write().take(pass_id);
let pass = HUB.compute_passes.unregister(pass_id);
HUB.command_buffers
.write()

View File

@ -34,9 +34,7 @@ impl<B: hal::Backend> RenderPass<B> {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBufferId {
#[cfg(feature = "local")]
HUB.render_passes.unregister(pass_id);
let mut pass = HUB.render_passes.write().take(pass_id);
let mut pass = HUB.render_passes.unregister(pass_id);
unsafe {
pass.raw.end_render_pass();
}

View File

@ -1,5 +1,5 @@
use crate::{binding_model, command, conv, pipeline, resource, swap_chain};
use crate::hub::{HUB, Storage};
use crate::hub::HUB;
use crate::track::{BufferTracker, TextureTracker, TrackPermit};
use crate::{
LifeGuard, RefCount, Stored, SubmissionIndex, WeaklyStored,
@ -67,11 +67,13 @@ impl Eq for FramebufferKey {}
enum ResourceId {
Buffer(BufferId),
Texture(TextureId),
TextureView(TextureViewId),
}
enum Resource<B: hal::Backend> {
Buffer(resource::Buffer<B>),
Texture(resource::Texture<B>),
TextureView(resource::TextureView<B>),
}
struct ActiveSubmission<B: hal::Backend> {
@ -101,45 +103,6 @@ impl<B: hal::Backend> DestroyedResources<B> {
.push((resource_id, life_guard.ref_count.clone()));
}
fn triage_referenced(
&mut self,
buffer_guard: &mut Storage<resource::Buffer<B>>,
texture_guard: &mut Storage<resource::Texture<B>>,
) {
for i in (0..self.referenced.len()).rev() {
// one in resource itself, and one here in this list
let num_refs = self.referenced[i].1.load();
if num_refs <= 2 {
assert_eq!(num_refs, 2);
let resource_id = self.referenced.swap_remove(i).0;
let (submit_index, resource) = match resource_id {
ResourceId::Buffer(id) => {
#[cfg(feature = "local")]
HUB.buffers.unregister(id);
let buf = buffer_guard.take(id);
let si = buf.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::Buffer(buf))
}
ResourceId::Texture(id) => {
#[cfg(feature = "local")]
HUB.textures.unregister(id);
let tex = texture_guard.take(id);
let si = tex.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::Texture(tex))
}
};
match self
.active
.iter_mut()
.find(|a| a.index == submit_index)
{
Some(a) => a.resources.push(resource),
None => self.free.push(resource),
}
}
}
}
/// Returns the last submission index that is done.
fn cleanup(&mut self, raw: &B::Device) -> SubmissionIndex {
let mut last_done = 0;
@ -159,12 +122,15 @@ impl<B: hal::Backend> DestroyedResources<B> {
for resource in self.free.drain(..) {
match resource {
Resource::Buffer(buf) => {
unsafe { raw.destroy_buffer(buf.raw) };
}
Resource::Texture(tex) => {
unsafe { raw.destroy_image(tex.raw) };
}
Resource::Buffer(buf) => unsafe {
raw.destroy_buffer(buf.raw)
},
Resource::Texture(tex) => unsafe {
raw.destroy_image(tex.raw)
},
Resource::TextureView(view) => unsafe {
raw.destroy_image_view(view.raw)
},
}
}
@ -172,6 +138,44 @@ impl<B: hal::Backend> DestroyedResources<B> {
}
}
impl DestroyedResources<back::Backend> {
fn triage_referenced(&mut self) {
for i in (0..self.referenced.len()).rev() {
// one in resource itself, and one here in this list
let num_refs = self.referenced[i].1.load();
if num_refs <= 2 {
assert_eq!(num_refs, 2);
let resource_id = self.referenced.swap_remove(i).0;
let (submit_index, resource) = match resource_id {
ResourceId::Buffer(id) => {
let buf = HUB.buffers.unregister(id);
let si = buf.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::Buffer(buf))
}
ResourceId::Texture(id) => {
let tex = HUB.textures.unregister(id);
let si = tex.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::Texture(tex))
}
ResourceId::TextureView(id) => {
let view = HUB.texture_views.unregister(id);
let si = view.life_guard.submission_index.load(Ordering::Acquire);
(si, Resource::TextureView(view))
}
};
match self
.active
.iter_mut()
.find(|a| a.index == submit_index)
{
Some(a) => a.resources.push(resource),
None => self.free.push(resource),
}
}
}
}
}
pub struct Device<B: hal::Backend> {
pub(crate) raw: B::Device,
@ -579,8 +583,19 @@ pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
}
#[no_mangle]
pub extern "C" fn wgpu_texture_view_destroy(_texture_view_id: TextureViewId) {
unimplemented!()
pub extern "C" fn wgpu_texture_view_destroy(texture_view_id: TextureViewId) {
let texture_view_guard = HUB.texture_views.read();
let view = texture_view_guard.get(texture_view_id);
let device_id = HUB.textures
.read()
.get(view.texture_id.value)
.device_id.value;
HUB.devices
.read()
.get(device_id)
.destroyed
.lock()
.add(ResourceId::TextureView(texture_view_id), &view.life_guard);
}
@ -878,77 +893,80 @@ pub extern "C" fn wgpu_queue_submit(
) {
let mut device_guard = HUB.devices.write();
let device = device_guard.get_mut(queue_id);
let mut buffer_tracker = device.buffer_tracker.lock();
let mut texture_tracker = device.texture_tracker.lock();
let mut command_buffer_guard = HUB.command_buffers.write();
let mut swap_chain_links = Vec::new();
let command_buffer_ids =
unsafe { slice::from_raw_parts(command_buffer_ptr, command_buffer_count) };
let mut buffer_guard = HUB.buffers.write();
let mut texture_guard = HUB.textures.write();
let old_submit_index = device
.life_guard
.submission_index
.fetch_add(1, Ordering::Relaxed);
let mut swap_chain_links = Vec::new();
//TODO: if multiple command buffers are submitted, we can re-use the last
// native command buffer of the previous chain instead of always creating
// a temporary one, since the chains are not finished.
{
let mut command_buffer_guard = HUB.command_buffers.write();
let buffer_guard = HUB.buffers.read();
let texture_guard = HUB.textures.read();
let mut buffer_tracker = device.buffer_tracker.lock();
let mut texture_tracker = device.texture_tracker.lock();
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
let comb = command_buffer_guard.get_mut(cmb_id);
swap_chain_links.extend(comb.swap_chain_links.drain(..));
// update submission IDs
comb.life_guard.submission_index
.store(old_submit_index, Ordering::Release);
for id in comb.buffer_tracker.used() {
buffer_guard
.get(id)
.life_guard
.submission_index
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
let comb = command_buffer_guard.get_mut(cmb_id);
swap_chain_links.extend(comb.swap_chain_links.drain(..));
// update submission IDs
comb.life_guard.submission_index
.store(old_submit_index, Ordering::Release);
}
for id in comb.texture_tracker.used() {
texture_guard
.get(id)
.life_guard
.submission_index
.store(old_submit_index, Ordering::Release);
}
for id in comb.buffer_tracker.used() {
buffer_guard
.get(id)
.life_guard
.submission_index
.store(old_submit_index, Ordering::Release);
}
for id in comb.texture_tracker.used() {
texture_guard
.get(id)
.life_guard
.submission_index
.store(old_submit_index, Ordering::Release);
}
// execute resource transitions
let mut transit = device.com_allocator.extend(comb);
unsafe {
transit.begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
hal::command::CommandBufferInheritanceInfo::default(),
// execute resource transitions
let mut transit = device.com_allocator.extend(comb);
unsafe {
transit.begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
hal::command::CommandBufferInheritanceInfo::default(),
);
}
//TODO: fix the consume
command::CommandBuffer::insert_barriers(
&mut transit,
buffer_tracker.consume_by_replace(&comb.buffer_tracker),
texture_tracker.consume_by_replace(&comb.texture_tracker),
&*buffer_guard,
&*texture_guard,
);
}
//TODO: fix the consume
command::CommandBuffer::insert_barriers(
&mut transit,
buffer_tracker.consume_by_replace(&comb.buffer_tracker),
texture_tracker.consume_by_replace(&comb.texture_tracker),
&*buffer_guard,
&*texture_guard,
);
unsafe {
transit.finish();
}
comb.raw.insert(0, transit);
unsafe {
comb.raw.last_mut().unwrap().finish();
unsafe {
transit.finish();
}
comb.raw.insert(0, transit);
unsafe {
comb.raw.last_mut().unwrap().finish();
}
}
}
// now prepare the GPU submission
let fence = device.raw.create_fence(false).unwrap();
{
let command_buffer_guard = HUB.command_buffers.read();
let swap_chain_guard = HUB.swap_chains.read();
let wait_semaphores = swap_chain_links
.into_iter()
.map(|link| {
@ -959,6 +977,7 @@ pub extern "C" fn wgpu_queue_submit(
.sem_available;
(sem, hal::pso::PipelineStage::COLOR_ATTACHMENT_OUTPUT)
});
let submission =
hal::queue::Submission::<_, _, &[<back::Backend as hal::Backend>::Semaphore]> {
//TODO: may `OneShot` be enough?
@ -968,6 +987,7 @@ pub extern "C" fn wgpu_queue_submit(
wait_semaphores,
signal_semaphores: &[], //TODO: signal `sem_present`?
};
unsafe {
device.queue_group.queues[0]
.as_raw_mut()
@ -977,7 +997,7 @@ pub extern "C" fn wgpu_queue_submit(
let last_done = {
let mut destroyed = device.destroyed.lock();
destroyed.triage_referenced(&mut *buffer_guard, &mut *texture_guard);
destroyed.triage_referenced();
let last_done = destroyed.cleanup(&device.raw);
destroyed.active.push(ActiveSubmission {
@ -995,9 +1015,7 @@ pub extern "C" fn wgpu_queue_submit(
// finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids {
#[cfg(feature = "local")]
HUB.command_buffers.unregister(cmb_id);
let cmd_buf = command_buffer_guard.take(cmb_id);
let cmd_buf = HUB.command_buffers.unregister(cmb_id);
device.com_allocator.after_submit(cmd_buf);
}
}

View File

@ -58,9 +58,6 @@ impl<T> Storage<T> {
pub fn get_mut(&mut self, id: Id) -> &mut T {
self.map.get_mut(&id).unwrap()
}
pub fn take(&mut self, id: Id) -> T {
self.map.remove(&id).unwrap()
}
}
pub struct Registry<T> {
@ -94,16 +91,19 @@ impl<T> ops::DerefMut for Registry<T> {
}
}
#[cfg(feature = "local")]
impl<T> Registry<T> {
#[cfg(feature = "local")]
pub fn register(&self, value: T) -> Id {
let id = self.identity.lock().alloc();
let old = self.data.write().map.insert(id, value);
assert!(old.is_none());
id
}
pub fn unregister(&self, id: Id) {
pub fn unregister(&self, id: Id) -> T {
#[cfg(feature = "local")]
self.identity.lock().free(id);
self.data.write().map.remove(&id).unwrap()
}
}

View File

@ -71,20 +71,6 @@ pub struct SwapChain {
id: wgn::SwapChainId,
}
pub enum BindingResource<'a> {
Buffer {
buffer: &'a Buffer,
range: Range<u32>,
},
Sampler(&'a Sampler),
TextureView(&'a TextureView),
}
pub struct Binding<'a> {
pub binding: u32,
pub resource: BindingResource<'a>,
}
pub struct BindGroupLayout {
id: wgn::BindGroupLayoutId,
}
@ -132,6 +118,21 @@ pub struct Queue<'a> {
temp: &'a mut Temp,
}
pub enum BindingResource<'a> {
Buffer {
buffer: &'a Buffer,
range: Range<u32>,
},
Sampler(&'a Sampler),
TextureView(&'a TextureView),
}
pub struct Binding<'a> {
pub binding: u32,
pub resource: BindingResource<'a>,
}
pub struct BindGroupLayoutDescriptor<'a> {
pub bindings: &'a [BindGroupLayoutBinding],
}
@ -222,6 +223,7 @@ impl<'a> TextureCopyView<'a> {
}
}
impl Instance {
pub fn new() -> Self {
Instance {
@ -438,6 +440,12 @@ impl Buffer {
}
}
impl Drop for Buffer {
fn drop(&mut self) {
wgn::wgpu_buffer_destroy(self.id);
}
}
impl Texture {
pub fn create_view(&self, desc: &TextureViewDescriptor) -> TextureView {
TextureView {
@ -452,6 +460,18 @@ impl Texture {
}
}
impl Drop for Texture {
fn drop(&mut self) {
wgn::wgpu_texture_destroy(self.id);
}
}
impl Drop for TextureView {
fn drop(&mut self) {
wgn::wgpu_texture_view_destroy(self.id);
}
}
impl CommandEncoder {
pub fn finish(self) -> CommandBuffer {
CommandBuffer {
@ -568,10 +588,6 @@ impl CommandEncoder {
}
impl<'a> RenderPass<'a> {
pub fn end_pass(self) {
wgn::wgpu_render_pass_end_pass(self.id);
}
pub fn set_bind_group(&mut self, index: u32, bind_group: &BindGroup) {
wgn::wgpu_render_pass_set_bind_group(self.id, index, bind_group.id);
}
@ -621,11 +637,13 @@ impl<'a> RenderPass<'a> {
}
}
impl<'a> ComputePass<'a> {
pub fn end_pass(self) {
wgn::wgpu_compute_pass_end_pass(self.id);
impl<'a> Drop for RenderPass<'a> {
fn drop(&mut self) {
wgn::wgpu_render_pass_end_pass(self.id);
}
}
impl<'a> ComputePass<'a> {
pub fn set_bind_group(&mut self, index: u32, bind_group: &BindGroup) {
wgn::wgpu_compute_pass_set_bind_group(self.id, index, bind_group.id);
}
@ -639,6 +657,12 @@ impl<'a> ComputePass<'a> {
}
}
impl<'a> Drop for ComputePass<'a> {
fn drop(&mut self) {
wgn::wgpu_compute_pass_end_pass(self.id);
}
}
impl<'a> Queue<'a> {
pub fn submit(&mut self, command_buffers: &[CommandBuffer]) {
self.temp.command_buffers.clear();