mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-12-01 19:24:17 +00:00
Merge pull request #471 from tomaka/fut-fence-same-queue
Change futures to chain futures between frames
This commit is contained in:
commit
3a525e5a75
@ -164,12 +164,10 @@ fn main() {
|
||||
vulkano::framebuffer::Framebuffer::new(renderpass.clone(), dimensions, attachments).unwrap()
|
||||
}).collect::<Vec<_>>();
|
||||
|
||||
let mut submissions: Vec<Box<GpuFuture>> = Vec::new();
|
||||
let mut previous_frame_end = Box::new(vulkano::sync::now(device.clone())) as Box<GpuFuture>;
|
||||
|
||||
loop {
|
||||
while submissions.len() >= 4 {
|
||||
submissions.remove(0);
|
||||
}
|
||||
previous_frame_end.cleanup_finished();
|
||||
|
||||
let (image_num, future) = swapchain.acquire_next_image(Duration::new(10, 0)).unwrap();
|
||||
|
||||
@ -187,11 +185,11 @@ fn main() {
|
||||
.end_render_pass().unwrap()
|
||||
.build().unwrap();
|
||||
|
||||
let future = future
|
||||
let future = previous_frame_end.join(future)
|
||||
.then_execute(queue.clone(), cb).unwrap()
|
||||
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
submissions.push(Box::new(future) as Box<_>);
|
||||
previous_frame_end = Box::new(future) as Box<_>;
|
||||
|
||||
let mut done = false;
|
||||
events_loop.poll_events(|ev| {
|
||||
|
@ -161,12 +161,10 @@ fn main() {
|
||||
}).collect::<Vec<_>>();
|
||||
|
||||
|
||||
let mut submissions: Vec<Box<GpuFuture>> = Vec::new();
|
||||
let mut previous_frame = Box::new(vulkano::sync::now(device.clone())) as Box<GpuFuture>;
|
||||
|
||||
loop {
|
||||
while submissions.len() >= 4 {
|
||||
submissions.remove(0);
|
||||
}
|
||||
previous_frame.cleanup_finished();
|
||||
|
||||
{
|
||||
// aquiring write lock for the uniform buffer
|
||||
@ -179,7 +177,7 @@ fn main() {
|
||||
buffer_content.world = cgmath::Matrix4::from(rotation).into();
|
||||
}
|
||||
|
||||
let (image_num, future) = swapchain.acquire_next_image(std::time::Duration::new(1, 0)).unwrap();
|
||||
let (image_num, acquire_future) = swapchain.acquire_next_image(std::time::Duration::new(1, 0)).unwrap();
|
||||
|
||||
let command_buffer = vulkano::command_buffer::AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
|
||||
.begin_render_pass(
|
||||
@ -193,11 +191,11 @@ fn main() {
|
||||
.end_render_pass().unwrap()
|
||||
.build().unwrap();
|
||||
|
||||
let future = future
|
||||
let future = previous_frame.join(acquire_future)
|
||||
.then_execute(queue.clone(), command_buffer).unwrap()
|
||||
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
submissions.push(Box::new(future) as Box<_>);
|
||||
previous_frame = Box::new(future) as Box<_>;
|
||||
|
||||
let mut done = false;
|
||||
events_loop.poll_events(|ev| {
|
||||
|
@ -55,6 +55,7 @@ use vulkano::pipeline::viewport::Viewport;
|
||||
use vulkano::pipeline::viewport::Scissor;
|
||||
use vulkano::swapchain::SurfaceTransform;
|
||||
use vulkano::swapchain::Swapchain;
|
||||
use vulkano::sync::now;
|
||||
use vulkano::sync::GpuFuture;
|
||||
|
||||
use std::sync::Arc;
|
||||
@ -348,15 +349,15 @@ fn main() {
|
||||
// they are in use by the GPU.
|
||||
//
|
||||
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
|
||||
// that, we store them in a `Vec` and clean them from time to time.
|
||||
let mut submissions: Vec<Box<GpuFuture>> = Vec::new();
|
||||
// that, we store the submission of the previous frame here.
|
||||
let mut previous_frame_end = Box::new(now(device.clone())) as Box<GpuFuture>;
|
||||
|
||||
loop {
|
||||
// Clearing the old submissions by keeping alive only the ones which probably aren't
|
||||
// finished.
|
||||
while submissions.len() >= 4 {
|
||||
submissions.remove(0);
|
||||
}
|
||||
// It is important to call this function from time to time, otherwise resources will keep
|
||||
// accumulating and you will eventually reach an out of memory error.
|
||||
// Calling this function polls various fences in order to determine what the GPU has
|
||||
// already processed, and frees the resources that are no longer needed.
|
||||
previous_frame_end.cleanup_finished();
|
||||
|
||||
// Before we can draw on the output, we have to *acquire* an image from the swapchain. If
|
||||
// no image is available (which happens if you submit draw commands too quickly), then the
|
||||
@ -365,7 +366,7 @@ fn main() {
|
||||
//
|
||||
// This function can block if no image is available. The parameter is a timeout after
|
||||
// which the function call will return an error.
|
||||
let (image_num, future) = swapchain.acquire_next_image(Duration::new(1, 0)).unwrap();
|
||||
let (image_num, acquire_future) = swapchain.acquire_next_image(Duration::new(1, 0)).unwrap();
|
||||
|
||||
// In order to draw, we have to build a *command buffer*. The command buffer object holds
|
||||
// the list of commands that are going to be executed.
|
||||
@ -404,7 +405,7 @@ fn main() {
|
||||
// Finish building the command buffer by calling `build`.
|
||||
.build().unwrap();
|
||||
|
||||
let future = future
|
||||
let future = previous_frame_end.join(acquire_future)
|
||||
.then_execute(queue.clone(), command_buffer).unwrap()
|
||||
|
||||
// The color output is now expected to contain our triangle. But in order to show it on
|
||||
@ -415,7 +416,7 @@ fn main() {
|
||||
// the GPU has finished executing the command buffer that draws the triangle.
|
||||
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
|
||||
.then_signal_fence_and_flush().unwrap();
|
||||
submissions.push(Box::new(future) as Box<_>);
|
||||
previous_frame_end = Box::new(future) as Box<_>;
|
||||
|
||||
// Note that in more complex programs it is likely that one of `acquire_next_image`,
|
||||
// `command_buffer::submit`, or `present` will block for some time. This happens when the
|
||||
|
@ -787,6 +787,8 @@ unsafe impl<I> CommandBuffer for SubmitSyncLayer<I> where I: CommandBuffer {
|
||||
Err(err) => err
|
||||
};
|
||||
|
||||
// FIXME: this is bad because dropping the submit sync layer doesn't drop the
|
||||
// attachments of the framebuffer, meaning that they will stay locked
|
||||
match (img.try_gpu_lock(entry.exclusive, queue), prev_err) {
|
||||
(Ok(_), _) => (),
|
||||
(Err(err), AccessCheckError::Unknown) => return Err(err.into()),
|
||||
|
@ -238,6 +238,8 @@ unsafe impl<F, A> ImageAccess for AttachmentImageAccess<F, A>
|
||||
#[inline]
|
||||
fn try_gpu_lock(&self, _: bool, _: &Queue) -> Result<(), AccessError> {
|
||||
// FIXME: uncomment when it's working
|
||||
// the problem is in the submit sync layer which locks framebuffer attachments and
|
||||
// keeps them locked even after destruction
|
||||
Ok(())
|
||||
/*if self.already_locked.swap(true, Ordering::SeqCst) == true {
|
||||
return false;
|
||||
@ -249,6 +251,8 @@ unsafe impl<F, A> ImageAccess for AttachmentImageAccess<F, A>
|
||||
#[inline]
|
||||
unsafe fn increase_gpu_lock(&self) {
|
||||
// FIXME: uncomment when it's working
|
||||
// the problem is in the submit sync layer which locks framebuffer attachments and
|
||||
// keeps them locked even after destruction
|
||||
/*debug_assert!(self.already_locked.load(Ordering::SeqCst));
|
||||
let val = self.img.gpu_lock.fetch_add(1, Ordering::SeqCst);
|
||||
debug_assert!(val >= 1);*/
|
||||
|
@ -339,6 +339,8 @@ impl Swapchain {
|
||||
queue: queue,
|
||||
swapchain: me,
|
||||
image_id: index as u32,
|
||||
image: swapchain_image,
|
||||
flushed: AtomicBool::new(false),
|
||||
finished: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
@ -629,7 +631,13 @@ pub struct PresentFuture<P> where P: GpuFuture {
|
||||
previous: P,
|
||||
queue: Arc<Queue>,
|
||||
swapchain: Arc<Swapchain>,
|
||||
image: Arc<SwapchainImage>,
|
||||
image_id: u32,
|
||||
// True if `flush()` has been called on the future, which means that the present command has
|
||||
// been submitted.
|
||||
flushed: AtomicBool,
|
||||
// True if `signal_finished()` has been called on the future, which means that the future has
|
||||
// been submitted and has already been processed by the GPU.
|
||||
finished: AtomicBool,
|
||||
}
|
||||
|
||||
@ -641,6 +649,10 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
|
||||
#[inline]
|
||||
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
|
||||
if self.flushed.load(Ordering::SeqCst) {
|
||||
return Ok(SubmitAnyBuilder::Empty);
|
||||
}
|
||||
|
||||
let queue = self.previous.queue().map(|q| q.clone());
|
||||
|
||||
// TODO: if the swapchain image layout is not PRESENT, should add a transition command
|
||||
@ -675,11 +687,25 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
|
||||
#[inline]
|
||||
fn flush(&self) -> Result<(), FlushError> {
|
||||
unimplemented!()
|
||||
unsafe {
|
||||
// If `flushed` already contains `true`, then `build_submission` will return `Empty`.
|
||||
|
||||
match self.build_submission()? {
|
||||
SubmitAnyBuilder::Empty => {}
|
||||
SubmitAnyBuilder::QueuePresent(present) => {
|
||||
present.submit(&self.queue)?;
|
||||
}
|
||||
_ => unreachable!()
|
||||
}
|
||||
|
||||
self.flushed.store(true, Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn signal_finished(&self) {
|
||||
self.flushed.store(true, Ordering::SeqCst);
|
||||
self.finished.store(true, Ordering::SeqCst);
|
||||
self.previous.signal_finished();
|
||||
}
|
||||
@ -703,14 +729,23 @@ unsafe impl<P> GpuFuture for PresentFuture<P> where P: GpuFuture {
|
||||
fn check_buffer_access(&self, buffer: &BufferAccess, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
unimplemented!() // TODO: VK specs don't say whether it is legal to do that
|
||||
debug_assert!(!buffer.conflicts_image_all(&self.image));
|
||||
self.previous.check_buffer_access(buffer, exclusive, queue)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_image_access(&self, image: &ImageAccess, layout: ImageLayout, exclusive: bool, queue: &Queue)
|
||||
-> Result<Option<(PipelineStages, AccessFlagBits)>, AccessCheckError>
|
||||
{
|
||||
unimplemented!() // TODO: VK specs don't say whether it is legal to do that
|
||||
if self.image.conflicts_image_all(&image) {
|
||||
// This future presents the swapchain image, which "unlocks" it. Therefore any attempt
|
||||
// to use this swapchain image afterwards shouldn't get granted automatic access.
|
||||
// Instead any attempt to access the image afterwards should get an authorization from
|
||||
// a later swapchain acquire future. Hence why we return `Unknown` here.
|
||||
Err(AccessCheckError::Unknown)
|
||||
} else {
|
||||
self.previous.check_image_access(image, layout, exclusive, queue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,12 +99,25 @@ impl<F> FenceSignalFuture<F> where F: GpuFuture {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
match *state {
|
||||
FenceSignalFutureState::Flushed(_, ref fence) => {
|
||||
FenceSignalFutureState::Flushed(ref mut prev, ref fence) => {
|
||||
match fence.wait(Some(Duration::from_secs(0))) {
|
||||
Ok(()) => (),
|
||||
Err(_) => return,
|
||||
Ok(()) => unsafe {
|
||||
prev.signal_finished()
|
||||
},
|
||||
Err(_) => {
|
||||
prev.cleanup_finished();
|
||||
return
|
||||
},
|
||||
}
|
||||
},
|
||||
FenceSignalFutureState::Pending(ref mut prev, _) => {
|
||||
prev.cleanup_finished();
|
||||
return;
|
||||
},
|
||||
FenceSignalFutureState::PartiallyFlushed(ref mut prev, _) => {
|
||||
prev.cleanup_finished();
|
||||
return;
|
||||
},
|
||||
_ => return,
|
||||
};
|
||||
|
||||
|
@ -59,6 +59,8 @@ pub unsafe trait GpuFuture: DeviceOwned {
|
||||
/// It is the responsibility of the caller to ensure that the submission is going to be
|
||||
/// submitted only once. However keep in mind that this function can perfectly be called
|
||||
/// multiple times (as long as the returned object is only submitted once).
|
||||
/// Also note that calling `flush()` on the future may change the value returned by
|
||||
/// `build_submission()`.
|
||||
///
|
||||
/// It is however the responsibility of the implementation to not return the same submission
|
||||
/// from multiple different future objects. For example if you implement `GpuFuture` on
|
||||
@ -197,9 +199,7 @@ pub unsafe trait GpuFuture: DeviceOwned {
|
||||
/// > function. If so, consider using `then_signal_fence_and_flush`.
|
||||
#[inline]
|
||||
fn then_signal_fence(self) -> FenceSignalFuture<Self> where Self: Sized {
|
||||
fence_signal::then_signal_fence(self, FenceSignalFutureBehavior::Block {
|
||||
timeout: None
|
||||
})
|
||||
fence_signal::then_signal_fence(self, FenceSignalFutureBehavior::Continue)
|
||||
}
|
||||
|
||||
/// Signals a fence after this future. Returns another future that represents the signal.
|
||||
|
Loading…
Reference in New Issue
Block a user