Use current active list instead of storing last done

This commit is contained in:
Layl 2020-01-02 01:24:02 +01:00
parent b55cd162bf
commit ac8f75288f
2 changed files with 15 additions and 23 deletions

View File

@ -26,18 +26,18 @@ struct CommandPool<B: hal::Backend> {
}
impl<B: hal::Backend> CommandPool<B> {
fn maintain(&mut self, last_done: usize) {
fn maintain(&mut self, lowest_active_index: SubmissionIndex) {
for i in (0 .. self.pending.len()).rev() {
let index = self.pending[i]
.life_guard
.submission_index
.load(Ordering::Acquire);
if index <= last_done {
if index < lowest_active_index {
let cmd_buf = self.pending.swap_remove(i);
log::trace!(
"recycling comb submitted in {} when {} is done",
index,
last_done
lowest_active_index,
);
self.recycle(cmd_buf);
}
@ -82,7 +82,7 @@ impl<B: GfxBackend> CommandAllocator<B> {
device_id: Stored<DeviceId>,
device: &B::Device,
features: Features,
last_done: usize,
lowest_active_index: SubmissionIndex,
) -> CommandBuffer<B> {
//debug_assert_eq!(device_id.backend(), B::VARIANT);
let thread_id = thread::current().id();
@ -101,7 +101,7 @@ impl<B: GfxBackend> CommandAllocator<B> {
});
// Recycle completed command buffers
pool.maintain(last_done);
pool.maintain(lowest_active_index);
let init = pool.allocate();

View File

@ -200,7 +200,7 @@ impl<B: GfxBackend> PendingResources<B> {
heaps_mutex: &Mutex<Heaps<B>>,
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
force_wait: bool,
) -> SubmissionIndex {
) {
if force_wait && !self.active.is_empty() {
let status = unsafe {
device.wait_for_fences(
@ -219,11 +219,6 @@ impl<B: GfxBackend> PendingResources<B> {
.iter()
.position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap() })
.unwrap_or(self.active.len());
let last_done = if done_count != 0 {
self.active[done_count - 1].index
} else {
return 0;
};
for a in self.active.drain(.. done_count) {
log::trace!("Active submission {} is done", a.index);
@ -260,8 +255,6 @@ impl<B: GfxBackend> PendingResources<B> {
},
}
}
last_done
}
fn triage_referenced<F: AllIdentityFilter>(
@ -516,9 +509,6 @@ pub struct Device<B: hal::Backend> {
pub(crate) framebuffers: Mutex<FastHashMap<FramebufferKey, B::Framebuffer>>,
pending: Mutex<PendingResources<B>>,
pub(crate) features: Features,
/// The last submission index that is done, used to reclaim command buffers on encoder creation.
/// Because of AtomicUsize not having fetch_max stabilized, this has to be a mutex right now.
last_done: Mutex<usize>,
}
impl<B: GfxBackend> Device<B> {
@ -578,7 +568,6 @@ impl<B: GfxBackend> Device<B> {
max_bind_groups,
supports_texture_d24_s8,
},
last_done: Mutex::new(0),
}
}
@ -594,16 +583,12 @@ impl<B: GfxBackend> Device<B> {
pending.triage_referenced(global, &mut *trackers, token);
pending.triage_mapped(global, token);
pending.triage_framebuffers(global, &mut *self.framebuffers.lock(), token);
let last_done = pending.cleanup(
pending.cleanup(
&self.raw,
&self.mem_allocator,
&self.desc_allocator,
force_wait,
);
{
let mut last_done_guard = self.last_done.lock();
*last_done_guard = last_done_guard.max(last_done);
}
let callbacks = pending.handle_mapping(global, &self.raw, token);
unsafe {
@ -1564,9 +1549,16 @@ impl<F: IdentityFilter<CommandEncoderId>> Global<F> {
value: device_id,
ref_count: device.life_guard.ref_count.clone(),
};
// The first entry in the active list should have the lowest index
let lowest_active_index = device.pending.lock()
.active.get(0)
.map(|active| active.index)
.unwrap_or(0);
let mut comb = device
.com_allocator
.allocate(dev_stored, &device.raw, device.features, *device.last_done.lock());
.allocate(dev_stored, &device.raw, device.features, lowest_active_index);
unsafe {
comb.raw.last_mut().unwrap().begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,