mirror of
https://github.com/gfx-rs/wgpu.git
synced 2024-11-21 22:33:49 +00:00
Fix command buffer recycling
This commit is contained in:
parent
c868db2cc7
commit
2221b112c7
@ -55,7 +55,7 @@ fn main() {
|
||||
|
||||
#[cfg(feature = "winit")]
|
||||
{
|
||||
use wgpu_native::winit::{ControlFlow, Event, EventsLoop, Window, WindowEvent};
|
||||
use wgpu_native::winit::{ControlFlow, Event, ElementState, EventsLoop, KeyboardInput, Window, WindowEvent, VirtualKeyCode};
|
||||
|
||||
let mut events_loop = EventsLoop::new();
|
||||
let window = Window::new(&events_loop).unwrap();
|
||||
@ -74,8 +74,20 @@ fn main() {
|
||||
|
||||
events_loop.run_forever(|event| {
|
||||
match event {
|
||||
Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => {
|
||||
return ControlFlow::Break
|
||||
Event::WindowEvent { event, .. } => match event {
|
||||
WindowEvent::KeyboardInput {
|
||||
input: KeyboardInput { virtual_keycode: Some(code), state: ElementState::Pressed, .. },
|
||||
..
|
||||
} => match code {
|
||||
VirtualKeyCode::Escape => {
|
||||
return ControlFlow::Break
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
WindowEvent::CloseRequested => {
|
||||
return ControlFlow::Break
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@ -100,7 +112,6 @@ fn main() {
|
||||
.submit(&[cmd_buf]);
|
||||
|
||||
swap_chain.present();
|
||||
|
||||
ControlFlow::Continue
|
||||
});
|
||||
}
|
||||
|
@ -1,14 +1,14 @@
|
||||
use super::CommandBuffer;
|
||||
use crate::track::Tracker;
|
||||
use crate::{DeviceId, LifeGuard, Stored};
|
||||
use crate::{DeviceId, LifeGuard, Stored, SubmissionIndex};
|
||||
|
||||
use hal::command::RawCommandBuffer;
|
||||
use hal::pool::RawCommandPool;
|
||||
use hal::Device;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use std::collections::HashMap;
|
||||
//TODO: use `parking_lot::Mutex`?
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::thread;
|
||||
|
||||
struct CommandPool<B: hal::Backend> {
|
||||
@ -29,7 +29,6 @@ impl<B: hal::Backend> CommandPool<B> {
|
||||
|
||||
struct Inner<B: hal::Backend> {
|
||||
pools: HashMap<thread::ThreadId, CommandPool<B>>,
|
||||
fences: Vec<B::Fence>,
|
||||
pending: Vec<CommandBuffer<B>>,
|
||||
}
|
||||
|
||||
@ -42,7 +41,6 @@ impl<B: hal::Backend> Inner<B> {
|
||||
}
|
||||
pool.available.push(raw);
|
||||
}
|
||||
self.fences.push(cmd_buf.fence);
|
||||
}
|
||||
}
|
||||
|
||||
@ -57,7 +55,6 @@ impl<B: hal::Backend> CommandAllocator<B> {
|
||||
queue_family,
|
||||
inner: Mutex::new(Inner {
|
||||
pools: HashMap::new(),
|
||||
fences: Vec::new(),
|
||||
pending: Vec::new(),
|
||||
}),
|
||||
}
|
||||
@ -69,15 +66,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
|
||||
device: &B::Device,
|
||||
) -> CommandBuffer<B> {
|
||||
let thread_id = thread::current().id();
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
let fence = match inner.fences.pop() {
|
||||
Some(fence) => {
|
||||
unsafe { device.reset_fence(&fence).unwrap() };
|
||||
fence
|
||||
}
|
||||
None => device.create_fence(false).unwrap(),
|
||||
};
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
let pool = inner.pools.entry(thread_id).or_insert_with(|| CommandPool {
|
||||
raw: unsafe {
|
||||
@ -93,7 +82,6 @@ impl<B: hal::Backend> CommandAllocator<B> {
|
||||
|
||||
CommandBuffer {
|
||||
raw: vec![init],
|
||||
fence,
|
||||
recorded_thread_id: thread_id,
|
||||
device_id,
|
||||
life_guard: LifeGuard::new(),
|
||||
@ -104,7 +92,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
|
||||
}
|
||||
|
||||
pub fn extend(&self, cmd_buf: &CommandBuffer<B>) -> B::CommandBuffer {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let mut inner = self.inner.lock();
|
||||
let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap();
|
||||
|
||||
if pool.available.is_empty() {
|
||||
@ -116,17 +104,18 @@ impl<B: hal::Backend> CommandAllocator<B> {
|
||||
}
|
||||
|
||||
pub fn submit(&self, cmd_buf: CommandBuffer<B>) {
|
||||
self.inner.lock().unwrap().pending.push(cmd_buf);
|
||||
self.inner.lock().pending.push(cmd_buf);
|
||||
}
|
||||
|
||||
pub fn recycle(&self, cmd_buf: CommandBuffer<B>) {
|
||||
self.inner.lock().unwrap().recycle(cmd_buf);
|
||||
self.inner.lock().recycle(cmd_buf);
|
||||
}
|
||||
|
||||
pub fn maintain(&self, device: &B::Device) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
pub fn maintain(&self, device: &B::Device, last_done: SubmissionIndex) {
|
||||
let mut inner = self.inner.lock();
|
||||
for i in (0..inner.pending.len()).rev() {
|
||||
if unsafe { device.get_fence_status(&inner.pending[i].fence).unwrap() } {
|
||||
let index = inner.pending[i].life_guard.submission_index.load(Ordering::Acquire);
|
||||
if index <= last_done {
|
||||
let cmd_buf = inner.pending.swap_remove(i);
|
||||
inner.recycle(cmd_buf);
|
||||
}
|
||||
|
@ -12,8 +12,11 @@ use crate::swap_chain::{SwapChainLink, SwapImageEpoch};
|
||||
use crate::track::{BufferTracker, TextureTracker};
|
||||
use crate::{conv, resource};
|
||||
use crate::{
|
||||
BufferId, BufferUsageFlags, Color, CommandBufferId, ComputePassId, DeviceId, LifeGuard,
|
||||
Origin3d, RenderPassId, Stored, TextureId, TextureUsageFlags, TextureViewId, WeaklyStored, B,
|
||||
BufferId, CommandBufferId, ComputePassId, DeviceId,
|
||||
RenderPassId, TextureId, TextureViewId,
|
||||
BufferUsageFlags, TextureUsageFlags, Color, Origin3d,
|
||||
LifeGuard, Stored, WeaklyStored,
|
||||
B,
|
||||
};
|
||||
|
||||
use hal::command::RawCommandBuffer;
|
||||
@ -82,10 +85,9 @@ pub struct TextureCopyView {
|
||||
|
||||
pub struct CommandBuffer<B: hal::Backend> {
|
||||
pub(crate) raw: Vec<B::CommandBuffer>,
|
||||
fence: B::Fence,
|
||||
recorded_thread_id: ThreadId,
|
||||
device_id: Stored<DeviceId>,
|
||||
life_guard: LifeGuard,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) buffer_tracker: BufferTracker,
|
||||
pub(crate) texture_tracker: TextureTracker,
|
||||
pub(crate) swap_chain_links: Vec<SwapChainLink<SwapImageEpoch>>,
|
||||
|
@ -105,12 +105,16 @@ impl<B: hal::Backend> DestroyedResources<B> {
|
||||
}
|
||||
}
|
||||
|
||||
fn cleanup(&mut self, raw: &B::Device) {
|
||||
/// Returns the last submission index that is done.
|
||||
fn cleanup(&mut self, raw: &B::Device) -> SubmissionIndex {
|
||||
let mut last_done = 0;
|
||||
|
||||
for i in (0..self.active.len()).rev() {
|
||||
if unsafe {
|
||||
raw.get_fence_status(&self.active[i].fence).unwrap()
|
||||
} {
|
||||
let a = self.active.swap_remove(i);
|
||||
last_done = last_done.max(a.index);
|
||||
self.free.extend(a.resources);
|
||||
unsafe {
|
||||
raw.destroy_fence(a.fence);
|
||||
@ -128,6 +132,8 @@ impl<B: hal::Backend> DestroyedResources<B> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
last_done
|
||||
}
|
||||
}
|
||||
|
||||
@ -539,7 +545,6 @@ pub extern "C" fn wgpu_queue_submit(
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let mut swap_chain_links = Vec::new();
|
||||
device.com_allocator.maintain(&device.raw);
|
||||
|
||||
//TODO: if multiple command buffers are submitted, we can re-use the last
|
||||
// native command buffer of the previous chain instead of always creating
|
||||
@ -550,6 +555,8 @@ pub extern "C" fn wgpu_queue_submit(
|
||||
let comb = command_buffer_guard.get_mut(cmb_id);
|
||||
swap_chain_links.extend(comb.swap_chain_links.drain(..));
|
||||
// update submission IDs
|
||||
comb.life_guard.submission_index
|
||||
.store(old_submit_index, Ordering::Release);
|
||||
for id in comb.buffer_tracker.used() {
|
||||
buffer_guard
|
||||
.get(id)
|
||||
@ -620,17 +627,21 @@ pub extern "C" fn wgpu_queue_submit(
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let last_done = {
|
||||
let mut destroyed = device.destroyed.lock();
|
||||
destroyed.triage_referenced(&mut *buffer_guard, &mut *texture_guard);
|
||||
destroyed.cleanup(&device.raw);
|
||||
let last_done = destroyed.cleanup(&device.raw);
|
||||
|
||||
destroyed.active.push(ActiveSubmission {
|
||||
index: old_submit_index + 1,
|
||||
fence,
|
||||
resources: Vec::new(),
|
||||
});
|
||||
}
|
||||
|
||||
last_done
|
||||
};
|
||||
|
||||
device.com_allocator.maintain(&device.raw, last_done);
|
||||
|
||||
// finally, return the command buffers to the allocator
|
||||
for &cmb_id in command_buffer_ids {
|
||||
@ -864,15 +875,15 @@ pub extern "C" fn wgpu_device_create_swap_chain(
|
||||
) -> SwapChainId {
|
||||
let device_guard = HUB.devices.read();
|
||||
let device = device_guard.get(device_id);
|
||||
let adapter_guard = HUB.adapters.read();
|
||||
let physical_device = &adapter_guard.get(device.adapter_id.0).physical_device;
|
||||
let mut surface_guard = HUB.surfaces.write();
|
||||
let surface = surface_guard.get_mut(surface_id);
|
||||
let mut texture_guard = HUB.textures.write();
|
||||
let mut texture_view_guard = HUB.texture_views.write();
|
||||
let mut swap_chain_guard = HUB.swap_chains.write();
|
||||
|
||||
let (caps, formats, _present_modes, _composite_alphas) = surface.raw.compatibility(physical_device);
|
||||
let (caps, formats, _present_modes, _composite_alphas) = {
|
||||
let adapter_guard = HUB.adapters.read();
|
||||
let adapter = adapter_guard.get(device.adapter_id.0);
|
||||
assert!(surface.raw.supports_queue_family(&adapter.queue_families[0]));
|
||||
surface.raw.compatibility(&adapter.physical_device)
|
||||
};
|
||||
let num_frames = caps.image_count.start; //TODO: configure?
|
||||
let frame_format = conv::map_texture_format(desc.format);
|
||||
let config = hal::SwapchainConfig::new(
|
||||
@ -912,6 +923,8 @@ pub extern "C" fn wgpu_device_create_swap_chain(
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let mut swap_chain_guard = HUB.swap_chains.write();
|
||||
|
||||
let swap_chain_id = swap_chain_guard
|
||||
.register(swap_chain::SwapChain {
|
||||
raw,
|
||||
@ -931,6 +944,9 @@ pub extern "C" fn wgpu_device_create_swap_chain(
|
||||
hal::Backbuffer::Framebuffer(_) => panic!("Deprecated API detected!"),
|
||||
};
|
||||
|
||||
let mut texture_guard = HUB.textures.write();
|
||||
let mut texture_view_guard = HUB.texture_views.write();
|
||||
|
||||
for (i, image) in images.into_iter().enumerate() {
|
||||
let kind = hal::image::Kind::D2(desc.width, desc.height, 1, 1);
|
||||
let full_range = hal::image::SubresourceRange {
|
||||
|
Loading…
Reference in New Issue
Block a user