427: Rewrite of the resource lifetime tracking r=crossed-fingers a=kvark

Addresses this TODO item in `triage_referenced()`:
> //TODO: lock less, if possible

Now, also fixes #428 

Pros:
  - only locking the storages that need to be, and only for the duration of their cleanup
  - less run-time branching, more predefined code paths, which may lead to better cache utilization (thinking of both instruction and data cache here)
  - more consistent use of `Stored` type

Cons:
  - a bit of verbosity / code duplication in `triage_referenced()`. In particular, the code that finds where to register an unreferenced resource, it used to be unified for all resources.

---
@grovesNL this should be reviewable on a commit basis. The high-level breakdown of changes is:
  1. Switch from enum of resource types to structure of arrays for the matter of lifetime tracker
  2. Rename the involved structures to better reflect what they do (the old `PendingResources` was bad)
  3. Separate lifetime tracking into a sub-module
  4. Make `RefCount` in objects optional, getting the semantics of "user needs it" when `Some`.
  5. Rewrite the first stage of lifetime tracking: instead of permanently staging resources that the user doesn't need, and adding strong refcounts to them, we only populate it temporarily with anything that gets the refcount reduced. This means less overhead for `maintain()` at an increased risk of leaking some stuff (depends on our code quality).
  6. Consequently, device tracker becomes the main (and last) owner of all the resources.

Overall, it's a major change and risk. I tested on `vange-rs` (which I consider to be the most complex wgpu-rs app) with Vulkan validation enabled, and it seems all to be working good now.

Co-authored-by: Dzmitry Malyshau <kvarkus@gmail.com>
This commit is contained in:
bors[bot] 2020-01-08 23:26:44 +00:00 committed by GitHub
commit ba0acc94a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 852 additions and 595 deletions

View File

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* Generated with cbindgen:0.12.0 */
/* Generated with cbindgen:0.12.1 */
/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen.
* To generate this file:
@ -278,14 +278,14 @@ typedef void (*WGPUBufferMapReadCallback)(WGPUBufferMapAsyncStatus status, const
typedef void (*WGPUBufferMapWriteCallback)(WGPUBufferMapAsyncStatus status, uint8_t *data, uint8_t *userdata);
typedef uint64_t WGPUId_ComputePass_Dummy;
typedef WGPUId_ComputePass_Dummy WGPUComputePassId;
typedef uint64_t WGPUId_CommandBuffer_Dummy;
typedef WGPUId_CommandBuffer_Dummy WGPUCommandBufferId;
typedef uint64_t WGPUId_ComputePass_Dummy;
typedef WGPUId_ComputePass_Dummy WGPUComputePassId;
typedef WGPUCommandBufferId WGPUCommandEncoderId;
typedef struct {
@ -686,6 +686,8 @@ void wgpu_buffer_map_write_async(WGPUBufferId buffer_id,
void wgpu_buffer_unmap(WGPUBufferId buffer_id);
void wgpu_command_buffer_destroy(WGPUCommandBufferId command_buffer_id);
WGPUComputePassId wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId encoder_id,
const WGPUComputePassDescriptor *desc);
@ -714,6 +716,8 @@ void wgpu_command_encoder_copy_texture_to_texture(WGPUCommandEncoderId command_e
const WGPUTextureCopyView *destination,
WGPUExtent3d copy_size);
void wgpu_command_encoder_destroy(WGPUCommandEncoderId command_encoder_id);
WGPUCommandBufferId wgpu_command_encoder_finish(WGPUCommandEncoderId encoder_id,
const WGPUCommandBufferDescriptor *desc);

View File

@ -123,7 +123,7 @@ pub struct BindGroup<B: hal::Backend> {
impl<B: hal::Backend> Borrow<RefCount> for BindGroup<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
self.life_guard.ref_count.as_ref().unwrap()
}
}

View File

@ -140,6 +140,16 @@ impl<B: hal::Backend> CommandAllocator<B> {
pool.available.pop().unwrap()
}
pub fn discard(&self, mut cmd_buf: CommandBuffer<B>) {
cmd_buf.trackers.clear();
self.inner
.lock()
.pools
.get_mut(&cmd_buf.recorded_thread_id)
.unwrap()
.recycle(cmd_buf);
}
pub fn after_submit(&self, mut cmd_buf: CommandBuffer<B>, submit_index: SubmissionIndex) {
cmd_buf.trackers.clear();
cmd_buf

View File

@ -88,7 +88,7 @@ impl BindGroupEntry {
layout_id: bind_group.layout_id,
group_id: Stored {
value: bind_group_id,
ref_count: bind_group.life_guard.ref_count.clone(),
ref_count: bind_group.life_guard.add_ref(),
},
});
//TODO: validate the count of dynamic offsets to match the layout

View File

@ -252,7 +252,11 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
);
const MAX_TOTAL_ATTACHMENTS: usize = 10;
type OutputAttachment<'a> = (TextureId, &'a hal::image::SubresourceRange, Option<TextureUsage>);
type OutputAttachment<'a> = (
&'a Stored<TextureId>,
&'a hal::image::SubresourceRange,
Option<TextureUsage>,
);
let mut output_attachments = ArrayVec::<[OutputAttachment; MAX_TOTAL_ATTACHMENTS]>::new();
log::trace!(
@ -271,8 +275,8 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
} else {
extent = Some(view.extent);
}
let texture_id = match view.inner {
TextureViewInner::Native { ref source_id, .. } => source_id.value,
let source_id = match view.inner {
TextureViewInner::Native { ref source_id, .. } => source_id,
TextureViewInner::SwapChain { .. } => {
panic!("Unexpected depth/stencil use of swapchain image!")
}
@ -280,10 +284,10 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
// Using render pass for transition.
let consistent_usage = cmb.trackers.textures.query(
texture_id,
source_id.value,
view.range.clone(),
);
output_attachments.push((texture_id, &view.range, consistent_usage));
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(
@ -323,7 +327,7 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
);
let first_use = cmb.trackers.views.init(
at.attachment,
view.life_guard.ref_count.clone(),
view.life_guard.add_ref(),
&(),
).is_some();
@ -333,7 +337,7 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
source_id.value,
view.range.clone(),
);
output_attachments.push((source_id.value, &view.range, consistent_usage));
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(usage, hal::format::Aspects::COLOR).1,
@ -348,7 +352,7 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: at.attachment,
ref_count: view.life_guard.ref_count.clone(),
ref_count: view.life_guard.add_ref(),
});
}
@ -383,7 +387,7 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
);
let first_use = cmb.trackers.views.init(
resolve_target,
view.life_guard.ref_count.clone(),
view.life_guard.add_ref(),
&(),
).is_some();
@ -393,7 +397,7 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
source_id.value,
view.range.clone(),
);
output_attachments.push((source_id.value, &view.range, consistent_usage));
output_attachments.push((source_id, &view.range, consistent_usage));
let old_layout = match consistent_usage {
Some(usage) => conv::map_texture_state(usage, hal::format::Aspects::COLOR).1,
@ -408,7 +412,7 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
assert!(used_swap_chain_image.is_none());
used_swap_chain_image = Some(Stored {
value: resolve_target,
ref_count: view.life_guard.ref_count.clone(),
ref_count: view.life_guard.add_ref(),
});
}
@ -442,26 +446,26 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
};
let mut trackers = TrackerSet::new(B::VARIANT);
for (texture_id, view_range, consistent_usage) in output_attachments {
let texture = &texture_guard[texture_id];
for (source_id, view_range, consistent_usage) in output_attachments {
let texture = &texture_guard[source_id.value];
assert!(texture.usage.contains(TextureUsage::OUTPUT_ATTACHMENT));
let usage = consistent_usage.unwrap_or(TextureUsage::OUTPUT_ATTACHMENT);
match trackers.textures.init(
texture_id,
texture.life_guard.ref_count.clone(),
source_id.value,
source_id.ref_count.clone(),
&texture.full_range,
) {
Some(mut init) => init.set(view_range.clone(), usage),
None => panic!("Your texture {:?} is in the another attachment!", texture_id),
None => panic!("Your texture {:?} is in the another attachment!", source_id.value),
};
if consistent_usage.is_some() {
// If we expect the texture to be transited to a new state by the
// render pass configuration, make the tracker aware of that.
let _ = trackers.textures.change_replace(
texture_id,
&texture.life_guard.ref_count,
source_id.value,
&source_id.ref_count,
view_range.clone(),
TextureUsage::OUTPUT_ATTACHMENT,
&texture.full_range,
@ -674,7 +678,7 @@ impl<F: IdentityFilter<RenderPassId>> Global<F> {
current_comb,
Stored {
value: encoder_id,
ref_count: cmb.life_guard.ref_count.clone(),
ref_count: cmb.life_guard.add_ref(),
},
context,
trackers,
@ -703,7 +707,7 @@ impl<F: IdentityFilter<ComputePassId>> Global<F> {
let trackers = mem::replace(&mut cmb.trackers, TrackerSet::new(encoder_id.backend()));
let stored = Stored {
value: encoder_id,
ref_count: cmb.life_guard.ref_count.clone(),
ref_count: cmb.life_guard.add_ref(),
};
let pass = ComputePass::new(raw, stored, trackers, cmb.features.max_bind_groups);

View File

@ -0,0 +1,475 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
hub::{AllIdentityFilter, GfxBackend, Global, Token},
id,
resource,
track::TrackerSet,
FastHashMap,
Stored,
RefCount,
SubmissionIndex,
};
use copyless::VecHelper as _;
use hal::device::Device as _;
use parking_lot::Mutex;
use rendy_descriptor::{DescriptorAllocator, DescriptorSet};
use rendy_memory::{Heaps, MemoryBlock};
use std::{
sync::atomic::Ordering,
};
const CLEANUP_WAIT_MS: u64 = 5000;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Debug, Default)]
pub struct SuspectedResources {
pub(crate) buffers: Vec<id::BufferId>,
pub(crate) textures: Vec<id::TextureId>,
pub(crate) texture_views: Vec<id::TextureViewId>,
pub(crate) samplers: Vec<id::SamplerId>,
pub(crate) bind_groups: Vec<id::BindGroupId>,
}
impl SuspectedResources {
pub fn clear(&mut self) {
self.buffers.clear();
self.textures.clear();
self.texture_views.clear();
self.samplers.clear();
self.bind_groups.clear();
}
pub fn extend(&mut self, other: &Self) {
self.buffers.extend_from_slice(&other.buffers);
self.textures.extend_from_slice(&other.textures);
self.texture_views.extend_from_slice(&other.texture_views);
self.samplers.extend_from_slice(&other.samplers);
self.bind_groups.extend_from_slice(&other.bind_groups);
}
}
/// A struct that keeps lists of resources that are no longer needed.
#[derive(Debug)]
struct NonReferencedResources<B: hal::Backend> {
buffers: Vec<(B::Buffer, MemoryBlock<B>)>,
images: Vec<(B::Image, MemoryBlock<B>)>,
// Note: we keep the associated ID here in order to be able to check
// at any point what resources are used in a submission.
image_views: Vec<(id::TextureViewId, B::ImageView)>,
samplers: Vec<B::Sampler>,
framebuffers: Vec<B::Framebuffer>,
desc_sets: Vec<DescriptorSet<B>>,
}
impl<B: hal::Backend> NonReferencedResources<B> {
fn new() -> Self {
NonReferencedResources {
buffers: Vec::new(),
images: Vec::new(),
image_views: Vec::new(),
samplers: Vec::new(),
framebuffers: Vec::new(),
desc_sets: Vec::new(),
}
}
fn extend(&mut self, other: Self) {
self.buffers.extend(other.buffers);
self.images.extend(other.images);
self.image_views.extend(other.image_views);
self.samplers.extend(other.samplers);
self.framebuffers.extend(other.framebuffers);
self.desc_sets.extend(other.desc_sets);
}
unsafe fn clean(
&mut self,
device: &B::Device,
heaps_mutex: &Mutex<Heaps<B>>,
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
) {
if !self.buffers.is_empty() {
let mut heaps = heaps_mutex.lock();
for (raw, memory) in self.buffers.drain(..) {
device.destroy_buffer(raw);
heaps.free(device, memory);
}
}
if !self.images.is_empty() {
let mut heaps = heaps_mutex.lock();
for (raw, memory) in self.images.drain(..) {
device.destroy_image(raw);
heaps.free(device, memory);
}
}
for (_, raw) in self.image_views.drain(..) {
device.destroy_image_view(raw);
}
for raw in self.samplers.drain(..) {
device.destroy_sampler(raw);
}
for raw in self.framebuffers.drain(..) {
device.destroy_framebuffer(raw);
}
if !self.desc_sets.is_empty() {
descriptor_allocator_mutex
.lock()
.free(self.desc_sets.drain(..));
}
}
}
#[derive(Debug)]
struct ActiveSubmission<B: hal::Backend> {
index: SubmissionIndex,
fence: B::Fence,
last_resources: NonReferencedResources<B>,
mapped: Vec<id::BufferId>,
}
/// A struct responsible for tracking resource lifetimes.
///
/// Here is how host mapping is handled:
/// 1. When mapping is requested we add the buffer to the life_tracker list of `mapped` buffers.
/// 2. When `triage_suspected` is called, it checks the last submission index associated with each of the mapped buffer,
/// and register the buffer with either a submission in flight, or straight into `ready_to_map` vector.
/// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
/// 4. Finally, `handle_mapping` issues all the callbacks.
#[derive(Debug)]
pub struct LifetimeTracker<B: hal::Backend> {
/// Resources that the user has requested be mapped, but are still in use.
mapped: Vec<Stored<id::BufferId>>,
/// Resources that are suspected for destruction.
pub suspected_resources: SuspectedResources,
/// Resources that are not referenced any more but still used by GPU.
/// Grouped by submissions associated with a fence and a submission index.
/// The active submissions have to be stored in FIFO order: oldest come first.
active: Vec<ActiveSubmission<B>>,
/// Resources that are neither referenced or used, just life_tracker
/// actual deletion.
free_resources: NonReferencedResources<B>,
ready_to_map: Vec<id::BufferId>,
}
impl<B: GfxBackend> LifetimeTracker<B> {
pub fn new() -> Self {
LifetimeTracker {
mapped: Vec::new(),
suspected_resources: SuspectedResources::default(),
active: Vec::new(),
free_resources: NonReferencedResources::new(),
ready_to_map: Vec::new(),
}
}
pub fn track_submission(
&mut self,
index: SubmissionIndex,
fence: B::Fence,
new_suspects: &SuspectedResources,
) {
self.suspected_resources.extend(new_suspects);
self.active
.alloc()
.init(ActiveSubmission {
index,
fence,
last_resources: NonReferencedResources::new(),
mapped: Vec::new(),
});
}
pub fn map(&mut self, buffer: id::BufferId, ref_count: RefCount) {
self.mapped.push(Stored {
value: buffer,
ref_count,
});
}
/// Find the pending entry with the lowest active index. If none can be found that means
/// everything in the allocator can be cleaned up, so std::usize::MAX is correct.
pub fn lowest_active_submission(&self) -> SubmissionIndex {
self.active
.iter()
.fold(std::usize::MAX, |v, active| active.index.min(v))
}
/// Returns the last submission index that is done.
fn check_last_done(
&mut self,
device: &B::Device,
force_wait: bool,
) -> SubmissionIndex {
if force_wait && !self.active.is_empty() {
let status = unsafe {
device.wait_for_fences(
self.active.iter().map(|a| &a.fence),
hal::device::WaitFor::All,
CLEANUP_WAIT_MS * 1_000_000,
)
};
assert_eq!(status, Ok(true), "GPU got stuck :(");
}
//TODO: enable when `is_sorted_by_key` is stable
//debug_assert!(self.active.is_sorted_by_key(|a| a.index));
let done_count = self
.active
.iter()
.position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap() })
.unwrap_or(self.active.len());
let last_done = if done_count != 0 {
self.active[done_count - 1].index
} else {
return 0;
};
for a in self.active.drain(.. done_count) {
log::trace!("Active submission {} is done", a.index);
self.free_resources.extend(a.last_resources);
self.ready_to_map.extend(a.mapped);
unsafe {
device.destroy_fence(a.fence);
}
}
last_done
}
pub fn cleanup(
&mut self,
device: &B::Device,
force_wait: bool,
heaps_mutex: &Mutex<Heaps<B>>,
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
) -> SubmissionIndex {
let last_done = self.check_last_done(device, force_wait);
unsafe {
self.free_resources.clean(
device,
heaps_mutex,
descriptor_allocator_mutex,
);
}
last_done
}
pub(crate) fn triage_suspected<F: AllIdentityFilter>(
&mut self,
global: &Global<F>,
trackers: &Mutex<TrackerSet>,
token: &mut Token<super::Device<B>>,
) {
let hub = B::hub(global);
if !self.suspected_resources.bind_groups.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.bind_groups.write(token);
for id in self.suspected_resources.bind_groups.drain(..) {
if trackers.bind_groups.remove_abandoned(id) {
hub.bind_groups.identity.free(id);
let res = guard.remove(id).unwrap();
assert!(res.used.bind_groups.is_empty());
self.suspected_resources.buffers.extend(res.used.buffers.used());
self.suspected_resources.textures.extend(res.used.textures.used());
self.suspected_resources.texture_views.extend(res.used.views.used());
self.suspected_resources.samplers.extend(res.used.samplers.used());
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.desc_sets.push(res.raw);
}
}
}
if !self.suspected_resources.texture_views.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.texture_views.write(token);
for id in self.suspected_resources.texture_views.drain(..) {
if trackers.views.remove_abandoned(id) {
hub.texture_views.identity.free(id);
let res = guard.remove(id).unwrap();
let raw = match res.inner {
resource::TextureViewInner::Native { raw, source_id } => {
self.suspected_resources.textures.push(source_id.value);
raw
}
resource::TextureViewInner::SwapChain { .. } => unreachable!(),
};
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.image_views.push((id, raw));
}
}
}
if !self.suspected_resources.textures.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.textures.write(token);
for id in self.suspected_resources.textures.drain(..) {
if trackers.textures.remove_abandoned(id) {
hub.textures.identity.free(id);
let res = guard.remove(id).unwrap();
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.images.push((res.raw, res.memory));
}
}
}
if !self.suspected_resources.samplers.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.samplers.write(token);
for id in self.suspected_resources.samplers.drain(..) {
if trackers.samplers.remove_abandoned(id) {
hub.samplers.identity.free(id);
let res = guard.remove(id).unwrap();
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.samplers.push(res.raw);
}
}
}
if !self.suspected_resources.buffers.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.buffers.write(token);
for id in self.suspected_resources.buffers.drain(..) {
if trackers.buffers.remove_abandoned(id) {
hub.buffers.identity.free(id);
let res = guard.remove(id).unwrap();
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.buffers.push((res.raw, res.memory));
}
}
}
}
pub(crate) fn triage_mapped<F>(
&mut self, global: &Global<F>, token: &mut Token<super::Device<B>>
) {
if self.mapped.is_empty() {
return;
}
let (buffer_guard, _) = B::hub(global).buffers.read(token);
for stored in self.mapped.drain(..) {
let resource_id = stored.value;
let buf = &buffer_guard[resource_id];
let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
log::trace!(
"Mapping of {:?} at submission {:?} gets assigned to active {:?}",
resource_id,
submit_index,
self.active.iter().position(|a| a.index == submit_index)
);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.ready_to_map, |a| &mut a.mapped)
.push(resource_id);
}
}
pub(crate) fn triage_framebuffers<F>(
&mut self,
global: &Global<F>,
framebuffers: &mut FastHashMap<super::FramebufferKey, B::Framebuffer>,
token: &mut Token<super::Device<B>>,
) {
let (texture_view_guard, _) = B::hub(global).texture_views.read(token);
let remove_list = framebuffers
.keys()
.filter_map(|key| {
let mut last_submit: SubmissionIndex = 0;
for &at in key.all() {
if texture_view_guard.contains(at) {
return None;
}
// This attachment is no longer registered.
// Let's see if it's used by any of the active submissions.
for a in &self.active {
if a.last_resources.image_views.iter().any(|&(id, _)| id == at) {
last_submit = last_submit.max(a.index);
}
}
}
Some((key.clone(), last_submit))
})
.collect::<FastHashMap<_, _>>();
for (ref key, submit_index) in remove_list {
let framebuffer = framebuffers.remove(key).unwrap();
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.framebuffers.push(framebuffer);
}
}
pub(crate) fn handle_mapping<F>(
&mut self,
global: &Global<F>,
raw: &B::Device,
token: &mut Token<super::Device<B>>,
) -> Vec<super::BufferMapPendingCallback> {
if self.ready_to_map.is_empty() {
return Vec::new();
}
let (mut buffer_guard, _) = B::hub(global).buffers.write(token);
self.ready_to_map
.drain(..)
.map(|buffer_id| {
let buffer = &mut buffer_guard[buffer_id];
let mapping = buffer.pending_mapping.take().unwrap();
let result = match mapping.op {
resource::BufferMapOperation::Read(..) => {
super::map_buffer(raw, buffer, mapping.range, super::HostMap::Read)
}
resource::BufferMapOperation::Write(..) => {
super::map_buffer(raw, buffer, mapping.range, super::HostMap::Write)
}
};
(mapping.op, result)
})
.collect()
}
}

File diff suppressed because it is too large Load Diff

View File

@ -88,7 +88,7 @@ impl Drop for RefCount {
#[derive(Debug)]
struct LifeGuard {
ref_count: RefCount,
ref_count: Option<RefCount>,
submission_index: AtomicUsize,
}
@ -96,10 +96,21 @@ impl LifeGuard {
fn new() -> Self {
let bx = Box::new(AtomicUsize::new(1));
LifeGuard {
ref_count: RefCount(ptr::NonNull::new(Box::into_raw(bx)).unwrap()),
ref_count: ptr::NonNull::new(Box::into_raw(bx)).map(RefCount),
submission_index: AtomicUsize::new(0),
}
}
fn add_ref(&self) -> RefCount {
self.ref_count.clone().unwrap()
}
/// Returns `true` if the resource is still needed by the user.
fn use_at(&self, submit_index: SubmissionIndex) -> bool {
self.submission_index
.store(submit_index, Ordering::Release);
self.ref_count.is_some()
}
}
#[derive(Clone, Debug)]

View File

@ -66,8 +66,8 @@ pub enum BufferMapAsyncStatus {
}
pub enum BufferMapOperation {
Read(std::ops::Range<u64>, Box<dyn FnOnce(BufferMapAsyncStatus, *const u8)>),
Write(std::ops::Range<u64>, Box<dyn FnOnce(BufferMapAsyncStatus, *mut u8)>),
Read(Box<dyn FnOnce(BufferMapAsyncStatus, *const u8)>),
Write(Box<dyn FnOnce(BufferMapAsyncStatus, *mut u8)>),
}
//TODO: clarify if/why this is needed here
@ -76,22 +76,22 @@ unsafe impl Sync for BufferMapOperation {}
impl fmt::Debug for BufferMapOperation {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let (op, range) = match *self {
BufferMapOperation::Read(ref range, _) => ("read", range),
BufferMapOperation::Write(ref range, _) => ("write", range),
let op = match *self {
BufferMapOperation::Read(_) => "read",
BufferMapOperation::Write(_) => "write",
};
write!(fmt, "BufferMapOperation <{}> of range {:?}", op, range)
write!(fmt, "BufferMapOperation <{}>", op)
}
}
impl BufferMapOperation {
pub(crate) fn call_error(self) {
match self {
BufferMapOperation::Read(_, callback) => {
BufferMapOperation::Read(callback) => {
log::error!("wgpu_buffer_map_read_async failed: buffer mapping is pending");
callback(BufferMapAsyncStatus::Error, std::ptr::null());
}
BufferMapOperation::Write(_, callback) => {
BufferMapOperation::Write(callback) => {
log::error!("wgpu_buffer_map_write_async failed: buffer mapping is pending");
callback(BufferMapAsyncStatus::Error, std::ptr::null_mut());
}
@ -99,6 +99,14 @@ impl BufferMapOperation {
}
}
#[derive(Debug)]
pub struct BufferPendingMapping {
pub range: std::ops::Range<BufferAddress>,
pub op: BufferMapOperation,
// hold the parent alive while the mapping is active
pub parent_ref_count: RefCount,
}
#[derive(Debug)]
pub struct Buffer<B: hal::Backend> {
pub(crate) raw: B::Buffer,
@ -107,14 +115,14 @@ pub struct Buffer<B: hal::Backend> {
pub(crate) memory: MemoryBlock<B>,
pub(crate) size: BufferAddress,
pub(crate) full_range: (),
pub(crate) mapped_write_ranges: Vec<std::ops::Range<u64>>,
pub(crate) pending_map_operation: Option<BufferMapOperation>,
pub(crate) mapped_write_ranges: Vec<std::ops::Range<BufferAddress>>,
pub(crate) pending_mapping: Option<BufferPendingMapping>,
pub(crate) life_guard: LifeGuard,
}
impl<B: hal::Backend> Borrow<RefCount> for Buffer<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
self.life_guard.ref_count.as_ref().unwrap()
}
}
@ -242,7 +250,7 @@ pub struct Texture<B: hal::Backend> {
impl<B: hal::Backend> Borrow<RefCount> for Texture<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
self.life_guard.ref_count.as_ref().unwrap()
}
}
@ -315,7 +323,7 @@ pub struct TextureView<B: hal::Backend> {
impl<B: hal::Backend> Borrow<RefCount> for TextureView<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
self.life_guard.ref_count.as_ref().unwrap()
}
}
@ -397,7 +405,7 @@ pub struct Sampler<B: hal::Backend> {
impl<B: hal::Backend> Borrow<RefCount> for Sampler<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
self.life_guard.ref_count.as_ref().unwrap()
}
}

View File

@ -167,7 +167,7 @@ impl<F: IdentityFilter<TextureViewId>> Global<F> {
image,
source_id: Stored {
value: swap_chain_id,
ref_count: sc.life_guard.ref_count.clone(),
ref_count: sc.life_guard.add_ref(),
},
framebuffers: SmallVec::new(),
},
@ -185,7 +185,7 @@ impl<F: IdentityFilter<TextureViewId>> Global<F> {
},
life_guard: LifeGuard::new(),
};
let ref_count = view.life_guard.ref_count.clone();
let ref_count = view.life_guard.add_ref();
let view_id = hub
.texture_views
.register_identity(view_id_in, view, &mut token);

View File

@ -192,6 +192,20 @@ impl<S: ResourceState> ResourceTracker<S> {
}
}
/// Removes the resource from the tracker if we are holding the last reference.
pub fn remove_abandoned(&mut self, id: S::Id) -> bool {
let (index, epoch, backend) = id.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.entry(index) {
Entry::Occupied(e) if e.get().ref_count.load() == 1 => {
let res = e.remove();
assert_eq!(res.epoch, epoch);
true
}
_ => false,
}
}
/// Try to optimize the internal representation.
pub fn optimize(&mut self) {
for resource in self.map.values_mut() {
@ -212,6 +226,11 @@ impl<S: ResourceState> ResourceTracker<S> {
self.map.clear();
}
/// Returns true if the tracker is empty.
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Initialize a resource to be used.
///
/// Returns `false` if the resource is already tracked.

View File

@ -284,6 +284,16 @@ pub extern "C" fn wgpu_device_create_command_encoder(
gfx_select!(device_id => GLOBAL.device_create_command_encoder(device_id, desc, PhantomData))
}
#[no_mangle]
pub extern "C" fn wgpu_command_encoder_destroy(command_encoder_id: id::CommandEncoderId) {
gfx_select!(command_encoder_id => GLOBAL.command_encoder_destroy(command_encoder_id))
}
#[no_mangle]
pub extern "C" fn wgpu_command_buffer_destroy(command_buffer_id: id::CommandBufferId) {
gfx_select!(command_buffer_id => GLOBAL.command_buffer_destroy(command_buffer_id))
}
#[no_mangle]
pub extern "C" fn wgpu_device_get_queue(device_id: id::DeviceId) -> id::QueueId {
device_id
@ -344,12 +354,11 @@ pub extern "C" fn wgpu_buffer_map_read_async(
userdata: *mut u8,
) {
let operation = core::resource::BufferMapOperation::Read(
start .. start + size,
Box::new(move |status, data| unsafe {
callback(status, data, userdata)
}),
);
gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, core::resource::BufferUsage::MAP_READ, operation))
gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, core::resource::BufferUsage::MAP_READ, start .. start + size, operation))
}
#[no_mangle]
@ -361,12 +370,11 @@ pub extern "C" fn wgpu_buffer_map_write_async(
userdata: *mut u8,
) {
let operation = core::resource::BufferMapOperation::Write(
start .. start + size,
Box::new(move |status, data| unsafe {
callback(status, data, userdata)
}),
);
gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, core::resource::BufferUsage::MAP_WRITE, operation))
gfx_select!(buffer_id => GLOBAL.buffer_map_async(buffer_id, core::resource::BufferUsage::MAP_WRITE, start .. start + size, operation))
}
#[no_mangle]