vulkano/examples/src/bin/triangle.rs

437 lines
22 KiB
Rust
Raw Normal View History

2016-03-26 09:17:37 +00:00
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
2016-04-29 08:28:20 +00:00
// Welcome to the triangle example!
//
// This is the only example that is entirely detailed. All the other examples avoid code
// duplication by using helper functions.
//
// This example assumes that you are already more or less familiar with graphics programming
// and that you want to learn Vulkan. This means that for example it won't go into details about
// what a vertex or a shader is.
// The `vulkano` crate is the main crate that you must use to use Vulkan.
#[macro_use]
2016-02-18 08:33:06 +00:00
extern crate vulkano;
2016-05-01 16:21:02 +00:00
// However the Vulkan library doesn't provide any functionality to create and handle windows, as
2016-04-29 08:28:20 +00:00
// this would be out of scope. In order to open a window, we are going to use the `winit` crate.
2016-02-28 16:21:01 +00:00
extern crate winit;
2016-04-29 08:28:20 +00:00
// The `vulkano_win` crate is the link between `vulkano` and `winit`. Vulkano doesn't know about
// winit, and winit doesn't know about vulkano, so import a crate that will provide a link between
// the two.
extern crate vulkano_win;
2016-02-28 16:21:01 +00:00
2016-04-23 11:32:31 +00:00
use vulkano_win::VkSurfaceBuild;
2016-02-18 08:33:06 +00:00
2016-04-29 08:28:20 +00:00
use vulkano::buffer::BufferUsage;
use vulkano::buffer::CpuAccessibleBuffer;
2016-11-10 10:20:46 +00:00
use vulkano::command_buffer;
2017-01-03 14:42:03 +00:00
use vulkano::command_buffer::AutoCommandBufferBuilder;
use vulkano::command_buffer::CommandBufferBuilder;
2016-04-29 08:28:20 +00:00
use vulkano::command_buffer::DynamicState;
use vulkano::descriptor::pipeline_layout::PipelineLayout;
use vulkano::descriptor::pipeline_layout::EmptyPipelineDesc;
2016-04-29 08:28:20 +00:00
use vulkano::device::Device;
2016-12-01 13:57:47 +00:00
use vulkano::framebuffer::Framebuffer;
2016-04-29 08:28:20 +00:00
use vulkano::framebuffer::Subpass;
use vulkano::instance::Instance;
use vulkano::pipeline::GraphicsPipeline;
use vulkano::pipeline::GraphicsPipelineParams;
use vulkano::pipeline::blend::Blend;
use vulkano::pipeline::depth_stencil::DepthStencil;
use vulkano::pipeline::input_assembly::InputAssembly;
use vulkano::pipeline::multisample::Multisample;
use vulkano::pipeline::vertex::SingleBufferDefinition;
use vulkano::pipeline::viewport::ViewportsState;
use vulkano::pipeline::viewport::Viewport;
use vulkano::pipeline::viewport::Scissor;
use vulkano::swapchain::SurfaceTransform;
use vulkano::swapchain::Swapchain;
2017-02-13 15:18:10 +00:00
use vulkano::sync::GpuFuture;
2016-04-29 08:28:20 +00:00
2016-12-01 13:57:47 +00:00
use std::sync::Arc;
2016-04-15 16:05:58 +00:00
use std::time::Duration;
2016-02-18 08:33:06 +00:00
fn main() {
// The first step of any vulkan program is to create an instance.
2016-04-29 08:28:20 +00:00
let instance = {
// When we create an instance, we have to pass a list of extensions that we want to enable.
//
2016-05-01 16:21:02 +00:00
// All the window-drawing functionalities are part of non-core extensions that we need
2016-04-29 08:28:20 +00:00
// to enable manually. To do so, we ask the `vulkano_win` crate for the list of extensions
// required to draw to a window.
let extensions = vulkano_win::required_extensions();
// Now creating the instance.
Instance::new(None, &extensions, None).expect("failed to create Vulkan instance")
};
2016-02-18 08:33:06 +00:00
// We then choose which physical device to use.
//
// In a real application, there are three things to take into consideration:
//
2016-04-29 08:28:20 +00:00
// - Some devices may not support some of the optional features that may be required by your
// application. You should filter out the devices that don't support your app.
2016-02-18 08:33:06 +00:00
//
// - Not all devices can draw to a certain surface. Once you create your window, you have to
// choose a device that is capable of drawing to it.
//
// - You probably want to leave the choice between the remaining devices to the user.
//
2016-04-29 08:28:20 +00:00
// For the sake of the example we are just going to use the first device, which should work
// most of the time.
2016-02-18 08:33:06 +00:00
let physical = vulkano::instance::PhysicalDevice::enumerate(&instance)
.next().expect("no device available");
2016-04-29 08:28:20 +00:00
// Some little debug infos.
2016-02-18 08:33:06 +00:00
println!("Using device: {} (type: {:?})", physical.name(), physical.ty());
2016-04-29 08:28:20 +00:00
// The objective of this example is to draw a triangle on a window. To do so, we first need to
// create the window.
//
// This is done by creating a `WindowBuilder` from the `winit` crate, then calling the
// `build_vk_surface` method provided by the `VkSurfaceBuild` trait from `vulkano_win`. If you
// ever get an error about `build_vk_surface` being undefined in one of your projects, this
// probably means that you forgot to import this trait.
//
2016-04-29 08:28:20 +00:00
// This returns a `vulkano_win::Window` object that contains both a cross-platform winit
// window and a cross-platform Vulkan surface that represents the surface of the window.
2016-04-23 11:32:31 +00:00
let window = winit::WindowBuilder::new().build_vk_surface(&instance).unwrap();
2016-02-18 08:33:06 +00:00
2016-04-29 08:28:20 +00:00
// The next step is to choose which GPU queue will execute our draw commands.
2016-02-18 08:33:06 +00:00
//
2016-04-29 08:28:20 +00:00
// Devices can provide multiple queues to run commands in parallel (for example a draw queue
// and a compute queue), similar to CPU threads. This is something you have to have to manage
// manually in Vulkan.
2016-02-18 08:33:06 +00:00
//
2016-04-29 08:28:20 +00:00
// In a real-life application, we would probably use at least a graphics queue and a transfers
// queue to handle data transfers in parallel. In this example we only use one queue.
//
// We have to choose which queues to use early on, because we will need this info very soon.
let queue = physical.queue_families().find(|q| {
// We take the first queue that supports drawing to our window.
q.supports_graphics() && window.surface().is_supported(q).unwrap_or(false)
}).expect("couldn't find a graphical queue family");
2016-02-18 08:33:06 +00:00
2016-04-29 08:28:20 +00:00
// Now initializing the device. This is probably the most important object of Vulkan.
//
// We have to pass five parameters when creating a device:
//
// - Which physical device to connect to.
//
// - A list of optional features and extensions that our program needs to work correctly.
// Some parts of the Vulkan specs are optional and must be enabled manually at device
// creation. In this example the only thing we are going to need is the `khr_swapchain`
// extension that allows us to draw to a window.
//
// - A list of layers to enable. This is very niche, and you will usually pass `None`.
2016-02-18 08:33:06 +00:00
//
2016-04-29 08:28:20 +00:00
// - The list of queues that we are going to use. The exact parameter is an iterator whose
// items are `(Queue, f32)` where the floating-point represents the priority of the queue
// between 0.0 and 1.0. The priority of the queue is a hint to the implementation about how
// much it should prioritize queues between one another.
2016-02-18 08:33:06 +00:00
//
// The list of created queues is returned by the function alongside with the device.
let (device, mut queues) = {
2016-04-29 08:28:20 +00:00
let device_ext = vulkano::device::DeviceExtensions {
khr_swapchain: true,
.. vulkano::device::DeviceExtensions::none()
};
Device::new(&physical, physical.supported_features(), &device_ext,
2016-04-29 08:28:20 +00:00
[(queue, 0.5)].iter().cloned()).expect("failed to create device")
2016-04-15 16:05:58 +00:00
};
2016-02-18 08:33:06 +00:00
// Since we can request multiple queues, the `queues` variable is in fact an iterator. In this
// example we use only one queue, so we just retreive the first and only element of the
// iterator and throw it away.
let queue = queues.next().unwrap();
2016-02-18 08:33:06 +00:00
// Before we can draw on the surface, we have to create what is called a swapchain. Creating
// a swapchain allocates the color buffers that will contain the image that will ultimately
// be visible on the screen. These images are returned alongside with the swapchain.
2016-02-18 08:33:06 +00:00
let (swapchain, images) = {
2016-04-29 08:28:20 +00:00
// Querying the capabilities of the surface. When we create the swapchain we can only
// pass values that are allowed by the capabilities.
let caps = window.surface().get_capabilities(&physical)
.expect("failed to get surface capabilities");
2016-02-18 08:33:06 +00:00
2016-04-29 08:28:20 +00:00
// We choose the dimensions of the swapchain to match the current dimensions of the window.
// If `caps.current_extent` is `None`, this means that the window size will be determined
// by the dimensions of the swapchain, in which case we just use a default value.
2016-02-18 08:33:06 +00:00
let dimensions = caps.current_extent.unwrap_or([1280, 1024]);
2016-04-29 08:28:20 +00:00
// The present mode determines the way the images will be presented on the screen. This
// includes things such as vsync and will affect the framerate of your application. We just
// use the first supported value, but you probably want to leave that choice to the user.
let present = caps.present_modes.iter().next().unwrap();
2016-02-18 08:33:06 +00:00
2016-04-29 08:28:20 +00:00
// The alpha mode indicates how the alpha value of the final image will behave. For example
2016-05-01 16:21:02 +00:00
// you can choose whether the window will be opaque or transparent.
2016-05-03 09:10:19 +00:00
let alpha = caps.supported_composite_alpha.iter().next().unwrap();
2016-04-29 08:28:20 +00:00
// Choosing the internal format that the images will have.
let format = caps.supported_formats[0].0;
// Please take a look at the docs for the meaning of the parameters we didn't mention.
Swapchain::new(&device, &window.surface(), caps.min_image_count, format, dimensions, 1,
2016-04-29 08:28:20 +00:00
&caps.supported_usage_flags, &queue, SurfaceTransform::Identity, alpha,
present, true, None).expect("failed to create swapchain")
2016-02-18 08:33:06 +00:00
};
// We now create a buffer that will store the shape of our triangle.
2016-04-29 08:28:20 +00:00
let vertex_buffer = {
#[derive(Debug, Clone)]
2016-04-29 08:28:20 +00:00
struct Vertex { position: [f32; 2] }
impl_vertex!(Vertex, position);
CpuAccessibleBuffer::from_iter(&device, &BufferUsage::all(), Some(queue.family()), [
Vertex { position: [-0.5, -0.25] },
Vertex { position: [0.0, 0.5] },
Vertex { position: [0.25, -0.1] }
].iter().cloned()).expect("failed to create buffer")
2016-04-29 08:28:20 +00:00
};
2016-02-18 08:33:06 +00:00
// The next step is to create the shaders.
2016-02-18 08:33:06 +00:00
//
// The shader creation API provided by the vulkano library is unsafe, for various reasons.
//
// Instead, in our build script we used the `vulkano-shaders` crate to parse our shaders at
2016-04-29 08:28:20 +00:00
// compile time and provide a safe wrapper over vulkano's API. See the `build.rs` file at the
// root of the crate. You can find the shaders' source code in the `triangle_fs.glsl` and
// `triangle_vs.glsl` files.
//
// The author knows that this system is crappy and that it would be far better to use a plugin.
// Unfortunately plugins aren't available in stable Rust yet.
2016-02-18 08:33:06 +00:00
//
// The code generated by the build script created a struct named `TriangleShader`, which we
// can now use to load the shader.
//
// Because of some restrictions with the `include!` macro, we need to use a module.
mod vs { include!{concat!(env!("OUT_DIR"), "/shaders/src/bin/triangle_vs.glsl")} }
2016-02-25 07:52:22 +00:00
let vs = vs::Shader::load(&device).expect("failed to create shader module");
mod fs { include!{concat!(env!("OUT_DIR"), "/shaders/src/bin/triangle_fs.glsl")} }
2016-02-25 07:52:22 +00:00
let fs = fs::Shader::load(&device).expect("failed to create shader module");
2016-02-18 08:33:06 +00:00
// At this point, OpenGL initialization would be finished. However in Vulkan it is not. OpenGL
// implicitely does a lot of computation whenever you draw. In Vulkan, you have to do all this
// manually.
2016-04-29 08:28:20 +00:00
// The next step is to create a *render pass*, which is an object that describes where the
2016-02-18 08:33:06 +00:00
// output of the graphics pipeline will go. It describes the layout of the images
// where the colors, depth and/or stencil information will be written.
2016-12-01 13:57:47 +00:00
let render_pass = Arc::new(single_pass_renderpass!(device.clone(),
attachments: {
// `color` is a custom name we give to the first and only attachment.
color: {
// `load: Clear` means that we ask the GPU to clear the content of this
// attachment at the start of the drawing.
load: Clear,
// `store: Store` means that we ask the GPU to store the output of the draw
// in the actual image. We could also ask it to discard the result.
store: Store,
// `format: <ty>` indicates the type of the format of the image. This has to
// be one of the types of the `vulkano::format` module (or alternatively one
// of your structs that implements the `FormatDesc` trait). Here we use the
// generic `vulkano::format::Format` enum because we don't know the format in
// advance.
format: images[0].format(),
// TODO:
samples: 1,
2016-02-28 16:21:01 +00:00
}
2016-12-01 13:57:47 +00:00
},
pass: {
// We use the attachment named `color` as the one and only color attachment.
color: [color],
// No depth-stencil attachment is indicated with empty brackets.
depth_stencil: {}
2016-02-18 08:33:06 +00:00
}
2016-12-01 13:57:47 +00:00
).unwrap());
2016-02-18 08:33:06 +00:00
2016-04-29 08:28:20 +00:00
// Before we draw we have to create what is called a pipeline. This is similar to an OpenGL
// program, but much more specific.
2017-01-03 14:42:03 +00:00
let pipeline = Arc::new(GraphicsPipeline::new(&device, GraphicsPipelineParams {
2016-04-29 08:28:20 +00:00
// We need to indicate the layout of the vertices.
// The type `SingleBufferDefinition` actually contains a template parameter corresponding
// to the type of each vertex. But in this code it is automatically inferred.
2016-04-29 08:28:20 +00:00
vertex_input: SingleBufferDefinition::new(),
// A Vulkan shader can in theory contain multiple entry points, so we have to specify
// which one. The `main` word of `main_entry_point` actually corresponds to the name of
// the entry point.
2016-04-15 16:05:58 +00:00
vertex_shader: vs.main_entry_point(),
2016-04-29 08:28:20 +00:00
// `InputAssembly::triangle_list()` is a shortcut to build a `InputAssembly` struct that
// describes a list of triangles.
input_assembly: InputAssembly::triangle_list(),
2016-05-20 17:30:30 +00:00
// No tessellation shader.
tessellation: None,
2016-04-29 08:28:20 +00:00
// No geometry shader.
2016-04-15 16:05:58 +00:00
geometry_shader: None,
2016-04-29 08:28:20 +00:00
// TODO: switch to dynamic viewports and explain how it works
viewport: ViewportsState::Fixed {
2016-02-18 20:11:17 +00:00
data: vec![(
2016-04-29 08:28:20 +00:00
Viewport {
2016-02-18 20:11:17 +00:00
origin: [0.0, 0.0],
2016-04-15 16:05:58 +00:00
depth_range: 0.0 .. 1.0,
2016-04-29 08:28:20 +00:00
dimensions: [images[0].dimensions()[0] as f32,
images[0].dimensions()[1] as f32],
2016-02-18 20:11:17 +00:00
},
2016-04-29 08:28:20 +00:00
Scissor::irrelevant()
2016-02-18 20:11:17 +00:00
)],
2016-04-15 16:05:58 +00:00
},
2016-04-29 08:28:20 +00:00
// The `Raster` struct can be used to customize parameters such as polygon mode or backface
// culling.
2016-04-15 16:05:58 +00:00
raster: Default::default(),
2016-04-29 08:28:20 +00:00
// If we use multisampling, we can pass additional configuration.
multisample: Multisample::disabled(),
// See `vertex_shader`.
2016-04-15 16:05:58 +00:00
fragment_shader: fs.main_entry_point(),
2016-04-29 08:28:20 +00:00
// `DepthStencil::disabled()` is a shortcut to build a `DepthStencil` struct that describes
// the fact that depth and stencil testing are disabled.
depth_stencil: DepthStencil::disabled(),
// `Blend::pass_through()` is a shortcut to build a `Blend` struct that describes the fact
// that colors must be directly transferred from the fragment shader output to the
// attachments without any change.
blend: Blend::pass_through(),
// We have to indicate which subpass of which render pass this pipeline is going to be used
// in. The pipeline will only be usable from this particular subpass.
2016-09-21 10:43:31 +00:00
render_pass: Subpass::from(render_pass.clone(), 0).unwrap(),
2017-01-03 14:42:03 +00:00
}).unwrap());
2016-04-29 08:28:20 +00:00
// The render pass we created above only describes the layout of our framebuffers. Before we
// can draw we also need to create the actual framebuffers.
2016-02-18 08:33:06 +00:00
//
// Since we need to draw to multiple images, we are going to create a different framebuffer for
// each image.
2016-02-18 08:33:06 +00:00
let framebuffers = images.iter().map(|image| {
2017-01-04 20:01:40 +00:00
// When we create the framebuffer we need to pass the actual list of images for the
// framebuffer's attachments.
//
// The type of data that corresponds to this list depends on the way you created the
// render pass. With the `single_pass_renderpass!` macro you need to call
// `.desc().start_attachments()`. The returned object will have a method whose name is the
// name of the first attachment. When called, it returns an object that will have a method
// whose name is the name of the second attachment. And so on. Only the object returned
// by the method of the last attachment can be passed to `Framebuffer::new`.
let attachments = render_pass.desc().start_attachments().color(image.clone());
// Actually creating the framebuffer. Note that we have to pass the dimensions of the
// framebuffer. These dimensions must be inferior or equal to the intersection of the
// dimensions of all the attachments.
let dimensions = [image.dimensions()[0], image.dimensions()[1], 1];
2017-01-04 20:01:40 +00:00
Framebuffer::new(render_pass.clone(), dimensions, attachments).unwrap()
2016-02-18 08:33:06 +00:00
}).collect::<Vec<_>>();
// Initialization is finally finished!
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
2017-02-13 15:18:10 +00:00
// an object that implements the `GpuFuture` trait, which holds the resources for as long as
// they are in use by the GPU.
2016-04-29 08:28:20 +00:00
//
2017-02-13 15:18:10 +00:00
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
2016-04-29 08:28:20 +00:00
// that, we store them in a `Vec` and clean them from time to time.
2017-02-13 15:18:10 +00:00
let mut submissions: Vec<Box<GpuFuture>> = Vec::new();
2016-02-18 08:33:06 +00:00
loop {
// Clearing the old submissions by keeping alive only the ones which probably aren't
// finished.
while submissions.len() >= 4 {
submissions.remove(0);
}
// Before we can draw on the output, we have to *acquire* an image from the swapchain. If
// no image is available (which happens if you submit draw commands too quickly), then the
// function will block.
2016-04-29 08:28:20 +00:00
// This operation returns the index of the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is a timeout after
// which the function call will return an error.
2017-02-13 15:18:10 +00:00
let (image_num, future) = swapchain.acquire_next_image(Duration::new(1, 0)).unwrap();
2016-02-18 08:33:06 +00:00
// In order to draw, we have to build a *command buffer*. The command buffer object holds
2016-04-29 08:28:20 +00:00
// the list of commands that are going to be executed.
//
// Building a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to be
// optimized.
2016-06-04 09:31:12 +00:00
//
// Note that we have to pass a queue family when we create the command buffer. The command
// buffer will only be executable on that given queue family.
2017-01-03 14:42:03 +00:00
let command_buffer = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
2016-04-29 08:28:20 +00:00
// Before we can draw, we have to *enter a render pass*. There are two methods to do
// this: `draw_inline` and `draw_secondary`. The latter is a bit more advanced and is
// not covered here.
//
2017-01-04 20:01:40 +00:00
// The third parameter builds the list of values to clear the attachments with. The API
// is similar to the list of attachments when building the framebuffers, except that
// only the attachments that use `load: Clear` appear in the list.
2017-01-04 16:49:23 +00:00
.begin_render_pass(framebuffers[image_num].clone(), false,
render_pass.desc().start_clear_values().color([0.0, 0.0, 1.0, 1.0]))
.unwrap()
2016-04-29 08:28:20 +00:00
// We are now inside the first subpass of the render pass. We add a draw command.
//
// The last two parameters contain the list of resources to pass to the shaders.
// Since we used an `EmptyPipeline` object, the objects have to be `()`.
.draw(pipeline.clone(), DynamicState::none(), vertex_buffer.clone(), (), ())
.unwrap()
2016-04-29 08:28:20 +00:00
// We leave the render pass by calling `draw_end`. Note that if we had multiple
// subpasses we could have called `next_inline` (or `next_secondary`) to jump to the
// next subpass.
2017-01-03 14:42:03 +00:00
.end_render_pass()
.unwrap()
2016-04-29 08:28:20 +00:00
// Finish building the command buffer by calling `build`.
.build().unwrap();
2016-04-29 08:28:20 +00:00
2017-02-13 15:18:10 +00:00
let future = future
.then_execute(queue.clone(), command_buffer)
2016-02-18 08:33:06 +00:00
2017-02-13 15:18:10 +00:00
// The color output is now expected to contain our triangle. But in order to show it on
// the screen, we have to *present* the image by calling `present`.
//
// This function does not actually present the image immediately. Instead it submits a
// present command at the end of the queue. This means that it will only be presented once
// the GPU has finished executing the command buffer that draws the triangle.
.then_swapchain_present(queue.clone(), swapchain.clone(), image_num)
.then_signal_fence();
future.flush().unwrap();
submissions.push(Box::new(future) as Box<_>);
2016-02-18 08:33:06 +00:00
// Note that in more complex programs it is likely that one of `acquire_next_image`,
2016-04-29 08:28:20 +00:00
// `command_buffer::submit`, or `present` will block for some time. This happens when the
// GPU's queue is full and the driver has to wait until the GPU finished some work.
//
// Unfortunately the Vulkan API doesn't provide any way to not wait or to detect when a
// wait would happen. Blocking may be the desired behavior, but if you don't want to
// block you should spawn a separate thread dedicated to submissions.
// Handling the window events in order to close the program when the user wants to close
// it.
2016-04-23 11:32:31 +00:00
for ev in window.window().poll_events() {
2016-02-28 16:21:01 +00:00
match ev {
winit::Event::Closed => return,
2016-02-28 16:21:01 +00:00
_ => ()
}
}
2016-02-18 08:33:06 +00:00
}
}