mirror of
https://gitlab.freedesktop.org/wlroots/wlroots.git
synced 2024-11-25 16:42:26 +00:00
render/vulkan: implement render pass API
This commit is contained in:
parent
8af00d5534
commit
e07c77f846
@ -12,6 +12,7 @@
|
|||||||
#include <wlr/util/addon.h>
|
#include <wlr/util/addon.h>
|
||||||
|
|
||||||
struct wlr_vk_descriptor_pool;
|
struct wlr_vk_descriptor_pool;
|
||||||
|
struct wlr_vk_texture;
|
||||||
|
|
||||||
struct wlr_vk_instance {
|
struct wlr_vk_instance {
|
||||||
VkInstance instance;
|
VkInstance instance;
|
||||||
@ -260,6 +261,13 @@ struct wlr_vk_renderer {
|
|||||||
} read_pixels_cache;
|
} read_pixels_cache;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// vertex shader push constant range data
|
||||||
|
struct wlr_vk_vert_pcr_data {
|
||||||
|
float mat4[4][4];
|
||||||
|
float uv_off[2];
|
||||||
|
float uv_size[2];
|
||||||
|
};
|
||||||
|
|
||||||
// Creates a vulkan renderer for the given device.
|
// Creates a vulkan renderer for the given device.
|
||||||
struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev);
|
struct wlr_renderer *vulkan_renderer_create_for_device(struct wlr_vk_device *dev);
|
||||||
|
|
||||||
@ -272,6 +280,18 @@ VkCommandBuffer vulkan_record_stage_cb(struct wlr_vk_renderer *renderer);
|
|||||||
// finished execution.
|
// finished execution.
|
||||||
bool vulkan_submit_stage_wait(struct wlr_vk_renderer *renderer);
|
bool vulkan_submit_stage_wait(struct wlr_vk_renderer *renderer);
|
||||||
|
|
||||||
|
struct wlr_vk_render_pass {
|
||||||
|
struct wlr_render_pass base;
|
||||||
|
struct wlr_vk_renderer *renderer;
|
||||||
|
struct wlr_vk_render_buffer *render_buffer;
|
||||||
|
struct wlr_vk_command_buffer *command_buffer;
|
||||||
|
VkPipeline bound_pipeline;
|
||||||
|
float projection[9];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct wlr_vk_render_pass *vulkan_begin_render_pass(struct wlr_vk_renderer *renderer,
|
||||||
|
struct wlr_vk_render_buffer *buffer);
|
||||||
|
|
||||||
// Suballocates a buffer span with the given size that can be mapped
|
// Suballocates a buffer span with the given size that can be mapped
|
||||||
// and used as staging buffer. The allocation is implicitly released when the
|
// and used as staging buffer. The allocation is implicitly released when the
|
||||||
// stage cb has finished execution. The start of the span will be a multiple
|
// stage cb has finished execution. The start of the span will be a multiple
|
||||||
@ -301,6 +321,19 @@ struct wlr_vk_renderer *vulkan_get_renderer(struct wlr_renderer *r);
|
|||||||
|
|
||||||
struct wlr_vk_pipeline_layout *vulkan_get_pipeline_layout(struct wlr_vk_renderer *renderer,
|
struct wlr_vk_pipeline_layout *vulkan_get_pipeline_layout(struct wlr_vk_renderer *renderer,
|
||||||
const struct wlr_vk_format *format);
|
const struct wlr_vk_format *format);
|
||||||
|
VkPipeline vulkan_get_texture_pipeline(struct wlr_vk_texture *texture,
|
||||||
|
struct wlr_vk_render_format_setup *render_setup);
|
||||||
|
|
||||||
|
struct wlr_vk_command_buffer *vulkan_acquire_command_buffer(
|
||||||
|
struct wlr_vk_renderer *renderer);
|
||||||
|
uint64_t vulkan_end_command_buffer(struct wlr_vk_command_buffer *cb,
|
||||||
|
struct wlr_vk_renderer *renderer);
|
||||||
|
bool vulkan_wait_command_buffer(struct wlr_vk_command_buffer *cb,
|
||||||
|
struct wlr_vk_renderer *renderer);
|
||||||
|
|
||||||
|
bool vulkan_sync_render_buffer(struct wlr_vk_renderer *renderer,
|
||||||
|
struct wlr_vk_render_buffer *render_buffer, struct wlr_vk_command_buffer *cb);
|
||||||
|
bool vulkan_sync_foreign_texture(struct wlr_vk_texture *texture);
|
||||||
|
|
||||||
// State (e.g. image texture) associated with a surface.
|
// State (e.g. image texture) associated with a surface.
|
||||||
struct wlr_vk_texture {
|
struct wlr_vk_texture {
|
||||||
|
@ -37,6 +37,7 @@ glslang_version_info = run_command(glslang, '--version', check: true).stdout()
|
|||||||
glslang_version = glslang_version_info.split('\n')[0].split(':')[-1]
|
glslang_version = glslang_version_info.split('\n')[0].split(':')[-1]
|
||||||
|
|
||||||
wlr_files += files(
|
wlr_files += files(
|
||||||
|
'pass.c',
|
||||||
'renderer.c',
|
'renderer.c',
|
||||||
'texture.c',
|
'texture.c',
|
||||||
'vulkan.c',
|
'vulkan.c',
|
||||||
|
603
render/vulkan/pass.c
Normal file
603
render/vulkan/pass.c
Normal file
@ -0,0 +1,603 @@
|
|||||||
|
#include <assert.h>
|
||||||
|
#include <drm_fourcc.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <wlr/util/log.h>
|
||||||
|
|
||||||
|
#include "render/vulkan.h"
|
||||||
|
#include "types/wlr_matrix.h"
|
||||||
|
|
||||||
|
static const struct wlr_render_pass_impl render_pass_impl;
|
||||||
|
|
||||||
|
static struct wlr_vk_render_pass *get_render_pass(struct wlr_render_pass *wlr_pass) {
|
||||||
|
assert(wlr_pass->impl == &render_pass_impl);
|
||||||
|
struct wlr_vk_render_pass *pass = wl_container_of(wlr_pass, pass, base);
|
||||||
|
return pass;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bind_pipeline(struct wlr_vk_render_pass *pass, VkPipeline pipeline) {
|
||||||
|
if (pipeline == pass->bound_pipeline) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
vkCmdBindPipeline(pass->command_buffer->vk, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
|
||||||
|
pass->bound_pipeline = pipeline;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void get_clip_region(struct wlr_vk_render_pass *pass,
|
||||||
|
const pixman_region32_t *in, pixman_region32_t *out) {
|
||||||
|
if (in != NULL) {
|
||||||
|
pixman_region32_init(out);
|
||||||
|
pixman_region32_copy(out, in);
|
||||||
|
} else {
|
||||||
|
struct wlr_buffer *buffer = pass->render_buffer->wlr_buffer;
|
||||||
|
pixman_region32_init_rect(out, 0, 0, buffer->width, buffer->height);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void convert_pixman_box_to_vk_rect(const pixman_box32_t *box, VkRect2D *rect) {
|
||||||
|
*rect = (VkRect2D){
|
||||||
|
.offset = { .x = box->x1, .y = box->y1 },
|
||||||
|
.extent = { .width = box->x2 - box->x1, .height = box->y2 - box->y1 },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static float color_to_linear(float non_linear) {
|
||||||
|
// See https://www.w3.org/Graphics/Color/srgb
|
||||||
|
return (non_linear > 0.04045) ?
|
||||||
|
pow((non_linear + 0.055) / 1.055, 2.4) :
|
||||||
|
non_linear / 12.92;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mat3_to_mat4(const float mat3[9], float mat4[4][4]) {
|
||||||
|
memset(mat4, 0, sizeof(float) * 16);
|
||||||
|
mat4[0][0] = mat3[0];
|
||||||
|
mat4[0][1] = mat3[1];
|
||||||
|
mat4[0][3] = mat3[2];
|
||||||
|
|
||||||
|
mat4[1][0] = mat3[3];
|
||||||
|
mat4[1][1] = mat3[4];
|
||||||
|
mat4[1][3] = mat3[5];
|
||||||
|
|
||||||
|
mat4[2][2] = 1.f;
|
||||||
|
mat4[3][3] = 1.f;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool render_pass_submit(struct wlr_render_pass *wlr_pass) {
|
||||||
|
struct wlr_vk_render_pass *pass = get_render_pass(wlr_pass);
|
||||||
|
struct wlr_vk_renderer *renderer = pass->renderer;
|
||||||
|
struct wlr_vk_command_buffer *render_cb = pass->command_buffer;
|
||||||
|
struct wlr_vk_render_buffer *render_buffer = pass->render_buffer;
|
||||||
|
|
||||||
|
if (vulkan_record_stage_cb(renderer) == VK_NULL_HANDLE) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct wlr_vk_command_buffer *stage_cb = renderer->stage.cb;
|
||||||
|
assert(stage_cb != NULL);
|
||||||
|
renderer->stage.cb = NULL;
|
||||||
|
|
||||||
|
if (render_buffer->blend_image) {
|
||||||
|
// Apply output shader to map blend image to actual output image
|
||||||
|
vkCmdNextSubpass(render_cb->vk, VK_SUBPASS_CONTENTS_INLINE);
|
||||||
|
|
||||||
|
VkPipeline pipe = render_buffer->render_setup->output_pipe;
|
||||||
|
if (pipe != renderer->bound_pipe) {
|
||||||
|
vkCmdBindPipeline(render_cb->vk, VK_PIPELINE_BIND_POINT_GRAPHICS, pipe);
|
||||||
|
renderer->bound_pipe = pipe;
|
||||||
|
}
|
||||||
|
|
||||||
|
float final_matrix[9] = {
|
||||||
|
renderer->render_width, 0, -1,
|
||||||
|
0, renderer->render_height, -1,
|
||||||
|
0, 0, 0,
|
||||||
|
};
|
||||||
|
struct wlr_vk_vert_pcr_data vert_pcr_data = {
|
||||||
|
.uv_off = { 0, 0 },
|
||||||
|
.uv_size = { 1, 1 },
|
||||||
|
};
|
||||||
|
mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
|
||||||
|
|
||||||
|
vkCmdPushConstants(render_cb->vk, renderer->output_pipe_layout,
|
||||||
|
VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(vert_pcr_data), &vert_pcr_data);
|
||||||
|
vkCmdBindDescriptorSets(render_cb->vk,
|
||||||
|
VK_PIPELINE_BIND_POINT_GRAPHICS, renderer->output_pipe_layout,
|
||||||
|
0, 1, &render_buffer->blend_descriptor_set, 0, NULL);
|
||||||
|
|
||||||
|
vkCmdDraw(render_cb->vk, 4, 1, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
vkCmdEndRenderPass(render_cb->vk);
|
||||||
|
|
||||||
|
// insert acquire and release barriers for dmabuf-images
|
||||||
|
uint32_t barrier_count = wl_list_length(&renderer->foreign_textures) + 1;
|
||||||
|
VkImageMemoryBarrier *acquire_barriers = calloc(barrier_count, sizeof(VkImageMemoryBarrier));
|
||||||
|
VkImageMemoryBarrier *release_barriers = calloc(barrier_count, sizeof(VkImageMemoryBarrier));
|
||||||
|
VkSemaphoreSubmitInfoKHR *render_wait = calloc(barrier_count * WLR_DMABUF_MAX_PLANES, sizeof(VkSemaphoreSubmitInfoKHR));
|
||||||
|
if (acquire_barriers == NULL || release_barriers == NULL || render_wait == NULL) {
|
||||||
|
wlr_log_errno(WLR_ERROR, "Allocation failed");
|
||||||
|
free(acquire_barriers);
|
||||||
|
free(release_barriers);
|
||||||
|
free(render_wait);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct wlr_vk_texture *texture, *tmp_tex;
|
||||||
|
size_t idx = 0;
|
||||||
|
uint32_t render_wait_len = 0;
|
||||||
|
wl_list_for_each_safe(texture, tmp_tex, &renderer->foreign_textures, foreign_link) {
|
||||||
|
VkImageLayout src_layout = VK_IMAGE_LAYOUT_GENERAL;
|
||||||
|
if (!texture->transitioned) {
|
||||||
|
src_layout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||||
|
texture->transitioned = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// acquire
|
||||||
|
acquire_barriers[idx] = (VkImageMemoryBarrier){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT,
|
||||||
|
.dstQueueFamilyIndex = renderer->dev->queue_family,
|
||||||
|
.image = texture->image,
|
||||||
|
.oldLayout = src_layout,
|
||||||
|
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||||
|
.srcAccessMask = 0, // ignored anyways
|
||||||
|
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT,
|
||||||
|
.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
|
.subresourceRange.layerCount = 1,
|
||||||
|
.subresourceRange.levelCount = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
// release
|
||||||
|
release_barriers[idx] = (VkImageMemoryBarrier){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
|
.srcQueueFamilyIndex = renderer->dev->queue_family,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT,
|
||||||
|
.image = texture->image,
|
||||||
|
.oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||||
|
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||||
|
.srcAccessMask = VK_ACCESS_SHADER_READ_BIT,
|
||||||
|
.dstAccessMask = 0, // ignored anyways
|
||||||
|
.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
|
.subresourceRange.layerCount = 1,
|
||||||
|
.subresourceRange.levelCount = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
++idx;
|
||||||
|
|
||||||
|
if (!vulkan_sync_foreign_texture(texture)) {
|
||||||
|
wlr_log(WLR_ERROR, "Failed to wait for foreign texture DMA-BUF fence");
|
||||||
|
} else {
|
||||||
|
for (size_t i = 0; i < WLR_DMABUF_MAX_PLANES; i++) {
|
||||||
|
if (texture->foreign_semaphores[i] != VK_NULL_HANDLE) {
|
||||||
|
assert(render_wait_len < barrier_count * WLR_DMABUF_MAX_PLANES);
|
||||||
|
render_wait[render_wait_len++] = (VkSemaphoreSubmitInfoKHR){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,
|
||||||
|
.semaphore = texture->foreign_semaphores[i],
|
||||||
|
.stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wl_list_remove(&texture->foreign_link);
|
||||||
|
texture->owned = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// also add acquire/release barriers for the current render buffer
|
||||||
|
VkImageLayout src_layout = VK_IMAGE_LAYOUT_GENERAL;
|
||||||
|
if (!render_buffer->transitioned) {
|
||||||
|
src_layout = VK_IMAGE_LAYOUT_PREINITIALIZED;
|
||||||
|
render_buffer->transitioned = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (render_buffer->blend_image) {
|
||||||
|
// The render pass changes the blend image layout from
|
||||||
|
// color attachment to read only, so on each frame, before
|
||||||
|
// the render pass starts, we change it back
|
||||||
|
VkImageLayout blend_src_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||||
|
if (!render_buffer->blend_transitioned) {
|
||||||
|
blend_src_layout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||||
|
render_buffer->blend_transitioned = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
VkImageMemoryBarrier blend_acq_barrier = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
|
.image = render_buffer->blend_image,
|
||||||
|
.oldLayout = blend_src_layout,
|
||||||
|
.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
|
||||||
|
.srcAccessMask = VK_ACCESS_SHADER_READ_BIT,
|
||||||
|
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
|
||||||
|
.subresourceRange = {
|
||||||
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
|
.layerCount = 1,
|
||||||
|
.levelCount = 1,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
vkCmdPipelineBarrier(stage_cb->vk, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
|
||||||
|
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||||
|
0, 0, NULL, 0, NULL, 1, &blend_acq_barrier);
|
||||||
|
}
|
||||||
|
|
||||||
|
// acquire render buffer before rendering
|
||||||
|
acquire_barriers[idx] = (VkImageMemoryBarrier){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT,
|
||||||
|
.dstQueueFamilyIndex = renderer->dev->queue_family,
|
||||||
|
.image = render_buffer->image,
|
||||||
|
.oldLayout = src_layout,
|
||||||
|
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||||
|
.srcAccessMask = 0, // ignored anyways
|
||||||
|
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
|
||||||
|
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
|
||||||
|
.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
|
.subresourceRange.layerCount = 1,
|
||||||
|
.subresourceRange.levelCount = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
// release render buffer after rendering
|
||||||
|
release_barriers[idx] = (VkImageMemoryBarrier){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
|
.srcQueueFamilyIndex = renderer->dev->queue_family,
|
||||||
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT,
|
||||||
|
.image = render_buffer->image,
|
||||||
|
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||||
|
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||||
|
.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
|
||||||
|
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
|
||||||
|
.dstAccessMask = 0, // ignored anyways
|
||||||
|
.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
|
.subresourceRange.layerCount = 1,
|
||||||
|
.subresourceRange.levelCount = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
++idx;
|
||||||
|
|
||||||
|
vkCmdPipelineBarrier(stage_cb->vk, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
||||||
|
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||||
|
0, 0, NULL, 0, NULL, barrier_count, acquire_barriers);
|
||||||
|
|
||||||
|
vkCmdPipelineBarrier(render_cb->vk, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
|
||||||
|
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL,
|
||||||
|
barrier_count, release_barriers);
|
||||||
|
|
||||||
|
free(acquire_barriers);
|
||||||
|
free(release_barriers);
|
||||||
|
|
||||||
|
// No semaphores needed here.
|
||||||
|
// We don't need a semaphore from the stage/transfer submission
|
||||||
|
// to the render submissions since they are on the same queue
|
||||||
|
// and we have a renderpass dependency for that.
|
||||||
|
uint64_t stage_timeline_point = vulkan_end_command_buffer(stage_cb, renderer);
|
||||||
|
if (stage_timeline_point == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
VkCommandBufferSubmitInfoKHR stage_cb_info = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR,
|
||||||
|
.commandBuffer = stage_cb->vk,
|
||||||
|
};
|
||||||
|
VkSemaphoreSubmitInfoKHR stage_signal = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,
|
||||||
|
.semaphore = renderer->timeline_semaphore,
|
||||||
|
.value = stage_timeline_point,
|
||||||
|
};
|
||||||
|
VkSubmitInfo2KHR stage_submit = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR,
|
||||||
|
.commandBufferInfoCount = 1,
|
||||||
|
.pCommandBufferInfos = &stage_cb_info,
|
||||||
|
.signalSemaphoreInfoCount = 1,
|
||||||
|
.pSignalSemaphoreInfos = &stage_signal,
|
||||||
|
};
|
||||||
|
|
||||||
|
VkSemaphoreSubmitInfoKHR stage_wait;
|
||||||
|
if (renderer->stage.last_timeline_point > 0) {
|
||||||
|
stage_wait = (VkSemaphoreSubmitInfoKHR){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,
|
||||||
|
.semaphore = renderer->timeline_semaphore,
|
||||||
|
.value = renderer->stage.last_timeline_point,
|
||||||
|
.stageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR,
|
||||||
|
};
|
||||||
|
|
||||||
|
stage_submit.waitSemaphoreInfoCount = 1;
|
||||||
|
stage_submit.pWaitSemaphoreInfos = &stage_wait;
|
||||||
|
}
|
||||||
|
|
||||||
|
renderer->stage.last_timeline_point = stage_timeline_point;
|
||||||
|
|
||||||
|
uint64_t render_timeline_point = vulkan_end_command_buffer(render_cb, renderer);
|
||||||
|
if (render_timeline_point == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint32_t render_signal_len = 1;
|
||||||
|
VkSemaphoreSubmitInfoKHR render_signal[2] = {0};
|
||||||
|
render_signal[0] = (VkSemaphoreSubmitInfoKHR){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,
|
||||||
|
.semaphore = renderer->timeline_semaphore,
|
||||||
|
.value = render_timeline_point,
|
||||||
|
};
|
||||||
|
if (renderer->dev->implicit_sync_interop) {
|
||||||
|
if (render_cb->binary_semaphore == VK_NULL_HANDLE) {
|
||||||
|
VkExportSemaphoreCreateInfo export_info = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
|
||||||
|
.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
|
||||||
|
};
|
||||||
|
VkSemaphoreCreateInfo semaphore_info = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
|
||||||
|
.pNext = &export_info,
|
||||||
|
};
|
||||||
|
VkResult res = vkCreateSemaphore(renderer->dev->dev, &semaphore_info,
|
||||||
|
NULL, &render_cb->binary_semaphore);
|
||||||
|
if (res != VK_SUCCESS) {
|
||||||
|
wlr_vk_error("vkCreateSemaphore", res);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
render_signal[render_signal_len++] = (VkSemaphoreSubmitInfoKHR){
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR,
|
||||||
|
.semaphore = render_cb->binary_semaphore,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
VkCommandBufferSubmitInfoKHR render_cb_info = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR,
|
||||||
|
.commandBuffer = render_cb->vk,
|
||||||
|
};
|
||||||
|
VkSubmitInfo2KHR render_submit = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR,
|
||||||
|
.waitSemaphoreInfoCount = render_wait_len,
|
||||||
|
.pWaitSemaphoreInfos = render_wait,
|
||||||
|
.commandBufferInfoCount = 1,
|
||||||
|
.pCommandBufferInfos = &render_cb_info,
|
||||||
|
.signalSemaphoreInfoCount = render_signal_len,
|
||||||
|
.pSignalSemaphoreInfos = render_signal,
|
||||||
|
};
|
||||||
|
|
||||||
|
VkSubmitInfo2KHR submit_info[] = { stage_submit, render_submit };
|
||||||
|
VkResult res = renderer->dev->api.vkQueueSubmit2KHR(renderer->dev->queue, 2, submit_info, VK_NULL_HANDLE);
|
||||||
|
if (res == VK_ERROR_DEVICE_LOST) {
|
||||||
|
wlr_log(WLR_ERROR, "vkQueueSubmit failed with VK_ERROR_DEVICE_LOST");
|
||||||
|
wl_signal_emit_mutable(&renderer->wlr_renderer.events.lost, NULL);
|
||||||
|
return false;
|
||||||
|
} else if (res != VK_SUCCESS) {
|
||||||
|
wlr_vk_error("vkQueueSubmit", res);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(render_wait);
|
||||||
|
|
||||||
|
struct wlr_vk_shared_buffer *stage_buf, *stage_buf_tmp;
|
||||||
|
wl_list_for_each_safe(stage_buf, stage_buf_tmp, &renderer->stage.buffers, link) {
|
||||||
|
if (stage_buf->allocs.size == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
wl_list_remove(&stage_buf->link);
|
||||||
|
wl_list_insert(&stage_cb->stage_buffers, &stage_buf->link);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!vulkan_sync_render_buffer(renderer, render_buffer, render_cb)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
wlr_buffer_unlock(render_buffer->wlr_buffer);
|
||||||
|
free(pass);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void render_pass_add_rect(struct wlr_render_pass *wlr_pass,
|
||||||
|
const struct wlr_render_rect_options *options) {
|
||||||
|
struct wlr_vk_render_pass *pass = get_render_pass(wlr_pass);
|
||||||
|
struct wlr_vk_renderer *renderer = pass->renderer;
|
||||||
|
VkCommandBuffer cb = pass->command_buffer->vk;
|
||||||
|
|
||||||
|
// Input color values are given in sRGB space, shader expects
|
||||||
|
// them in linear space. The shader does all computation in linear
|
||||||
|
// space and expects in inputs in linear space since it outputs
|
||||||
|
// colors in linear space as well (and vulkan then automatically
|
||||||
|
// does the conversion for out sRGB render targets).
|
||||||
|
float linear_color[] = {
|
||||||
|
color_to_linear(options->color.r),
|
||||||
|
color_to_linear(options->color.g),
|
||||||
|
color_to_linear(options->color.b),
|
||||||
|
options->color.a, // no conversion for alpha
|
||||||
|
};
|
||||||
|
|
||||||
|
pixman_region32_t clip;
|
||||||
|
get_clip_region(pass, options->clip, &clip);
|
||||||
|
|
||||||
|
int clip_rects_len;
|
||||||
|
const pixman_box32_t *clip_rects = pixman_region32_rectangles(&clip, &clip_rects_len);
|
||||||
|
|
||||||
|
switch (options->blend_mode) {
|
||||||
|
case WLR_RENDER_BLEND_MODE_PREMULTIPLIED:;
|
||||||
|
float proj[9], matrix[9];
|
||||||
|
wlr_matrix_identity(proj);
|
||||||
|
wlr_matrix_project_box(matrix, &options->box, WL_OUTPUT_TRANSFORM_NORMAL, 0, proj);
|
||||||
|
wlr_matrix_multiply(matrix, pass->projection, matrix);
|
||||||
|
|
||||||
|
struct wlr_vk_vert_pcr_data vert_pcr_data = {
|
||||||
|
.uv_off = { 0, 0 },
|
||||||
|
.uv_size = { 1, 1 },
|
||||||
|
};
|
||||||
|
mat3_to_mat4(matrix, vert_pcr_data.mat4);
|
||||||
|
|
||||||
|
bind_pipeline(pass, pass->render_buffer->render_setup->quad_pipe);
|
||||||
|
vkCmdPushConstants(cb, renderer->default_pipeline_layout.vk,
|
||||||
|
VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(vert_pcr_data), &vert_pcr_data);
|
||||||
|
vkCmdPushConstants(cb, renderer->default_pipeline_layout.vk,
|
||||||
|
VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(vert_pcr_data), sizeof(float) * 4,
|
||||||
|
linear_color);
|
||||||
|
|
||||||
|
for (int i = 0; i < clip_rects_len; i++) {
|
||||||
|
VkRect2D rect;
|
||||||
|
convert_pixman_box_to_vk_rect(&clip_rects[i], &rect);
|
||||||
|
vkCmdSetScissor(cb, 0, 1, &rect);
|
||||||
|
vkCmdDraw(cb, 4, 1, 0, 0);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case WLR_RENDER_BLEND_MODE_NONE:;
|
||||||
|
VkClearAttachment clear_att = {
|
||||||
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
|
.colorAttachment = 0,
|
||||||
|
.clearValue.color.float32 = {
|
||||||
|
linear_color[0],
|
||||||
|
linear_color[1],
|
||||||
|
linear_color[2],
|
||||||
|
linear_color[3],
|
||||||
|
},
|
||||||
|
};
|
||||||
|
VkClearRect clear_rect = {
|
||||||
|
.rect = {
|
||||||
|
.offset = { options->box.x, options->box.y },
|
||||||
|
.extent = { options->box.width, options->box.height },
|
||||||
|
},
|
||||||
|
.layerCount = 1,
|
||||||
|
};
|
||||||
|
for (int i = 0; i < clip_rects_len; i++) {
|
||||||
|
VkRect2D rect;
|
||||||
|
convert_pixman_box_to_vk_rect(&clip_rects[i], &rect);
|
||||||
|
vkCmdSetScissor(cb, 0, 1, &rect);
|
||||||
|
vkCmdClearAttachments(cb, 1, &clear_att, 1, &clear_rect);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
pixman_region32_fini(&clip);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void render_pass_add_texture(struct wlr_render_pass *wlr_pass,
|
||||||
|
const struct wlr_render_texture_options *options) {
|
||||||
|
struct wlr_vk_render_pass *pass = get_render_pass(wlr_pass);
|
||||||
|
struct wlr_vk_renderer *renderer = pass->renderer;
|
||||||
|
struct wlr_vk_render_buffer *render_buffer = pass->render_buffer;
|
||||||
|
VkCommandBuffer cb = pass->command_buffer->vk;
|
||||||
|
|
||||||
|
struct wlr_vk_texture *texture = vulkan_get_texture(options->texture);
|
||||||
|
assert(texture->renderer == renderer);
|
||||||
|
|
||||||
|
if (texture->dmabuf_imported && !texture->owned) {
|
||||||
|
// Store this texture in the list of textures that need to be
|
||||||
|
// acquired before rendering and released after rendering.
|
||||||
|
// We don't do it here immediately since barriers inside
|
||||||
|
// a renderpass are suboptimal (would require additional renderpass
|
||||||
|
// dependency and potentially multiple barriers) and it's
|
||||||
|
// better to issue one barrier for all used textures anyways.
|
||||||
|
texture->owned = true;
|
||||||
|
assert(texture->foreign_link.prev == NULL);
|
||||||
|
assert(texture->foreign_link.next == NULL);
|
||||||
|
wl_list_insert(&renderer->foreign_textures, &texture->foreign_link);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct wlr_fbox src_box;
|
||||||
|
wlr_render_texture_options_get_src_box(options, &src_box);
|
||||||
|
struct wlr_box dst_box;
|
||||||
|
wlr_render_texture_options_get_dst_box(options, &dst_box);
|
||||||
|
float alpha = wlr_render_texture_options_get_alpha(options);
|
||||||
|
|
||||||
|
pixman_region32_t clip;
|
||||||
|
get_clip_region(pass, options->clip, &clip);
|
||||||
|
|
||||||
|
float proj[9], matrix[9];
|
||||||
|
wlr_matrix_identity(proj);
|
||||||
|
wlr_matrix_project_box(matrix, &dst_box, options->transform, 0, proj);
|
||||||
|
wlr_matrix_multiply(matrix, pass->projection, matrix);
|
||||||
|
|
||||||
|
struct wlr_vk_vert_pcr_data vert_pcr_data = {
|
||||||
|
.uv_off = {
|
||||||
|
src_box.x / options->texture->width,
|
||||||
|
src_box.y / options->texture->height,
|
||||||
|
},
|
||||||
|
.uv_size = {
|
||||||
|
src_box.width / options->texture->width,
|
||||||
|
src_box.height / options->texture->height,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
mat3_to_mat4(matrix, vert_pcr_data.mat4);
|
||||||
|
|
||||||
|
VkPipelineLayout pipeline_layout = texture->pipeline_layout->vk;
|
||||||
|
VkPipeline pipeline = vulkan_get_texture_pipeline(texture, render_buffer->render_setup);
|
||||||
|
|
||||||
|
bind_pipeline(pass, pipeline);
|
||||||
|
|
||||||
|
vkCmdBindDescriptorSets(cb, VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||||
|
pipeline_layout, 0, 1, &texture->ds, 0, NULL);
|
||||||
|
|
||||||
|
vkCmdPushConstants(cb, pipeline_layout,
|
||||||
|
VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(vert_pcr_data), &vert_pcr_data);
|
||||||
|
vkCmdPushConstants(cb, pipeline_layout,
|
||||||
|
VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(vert_pcr_data), sizeof(float),
|
||||||
|
&alpha);
|
||||||
|
|
||||||
|
int clip_rects_len;
|
||||||
|
const pixman_box32_t *clip_rects = pixman_region32_rectangles(&clip, &clip_rects_len);
|
||||||
|
for (int i = 0; i < clip_rects_len; i++) {
|
||||||
|
VkRect2D rect;
|
||||||
|
convert_pixman_box_to_vk_rect(&clip_rects[i], &rect);
|
||||||
|
vkCmdSetScissor(cb, 0, 1, &rect);
|
||||||
|
vkCmdDraw(cb, 4, 1, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
texture->last_used_cb = pass->command_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct wlr_render_pass_impl render_pass_impl = {
|
||||||
|
.submit = render_pass_submit,
|
||||||
|
.add_rect = render_pass_add_rect,
|
||||||
|
.add_texture = render_pass_add_texture,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct wlr_vk_render_pass *vulkan_begin_render_pass(struct wlr_vk_renderer *renderer,
|
||||||
|
struct wlr_vk_render_buffer *buffer) {
|
||||||
|
struct wlr_vk_render_pass *pass = calloc(1, sizeof(*pass));
|
||||||
|
if (pass == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
wlr_render_pass_init(&pass->base, &render_pass_impl);
|
||||||
|
pass->renderer = renderer;
|
||||||
|
|
||||||
|
struct wlr_vk_command_buffer *cb = vulkan_acquire_command_buffer(renderer);
|
||||||
|
if (cb == NULL) {
|
||||||
|
free(pass);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
VkCommandBufferBeginInfo begin_info = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||||
|
};
|
||||||
|
VkResult res = vkBeginCommandBuffer(cb->vk, &begin_info);
|
||||||
|
if (res != VK_SUCCESS) {
|
||||||
|
wlr_vk_error("vkBeginCommandBuffer", res);
|
||||||
|
free(pass);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int width = buffer->wlr_buffer->width;
|
||||||
|
int height = buffer->wlr_buffer->height;
|
||||||
|
VkRect2D rect = { .extent = { width, height } };
|
||||||
|
|
||||||
|
VkRenderPassBeginInfo rp_info = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||||
|
.renderArea = rect,
|
||||||
|
.renderPass = buffer->render_setup->render_pass,
|
||||||
|
.framebuffer = buffer->framebuffer,
|
||||||
|
.clearValueCount = 0,
|
||||||
|
};
|
||||||
|
vkCmdBeginRenderPass(cb->vk, &rp_info, VK_SUBPASS_CONTENTS_INLINE);
|
||||||
|
|
||||||
|
vkCmdSetViewport(cb->vk, 0, 1, &(VkViewport){
|
||||||
|
.width = width,
|
||||||
|
.height = height,
|
||||||
|
.maxDepth = 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
// matrix_projection() assumes a GL coordinate system so we need
|
||||||
|
// to pass WL_OUTPUT_TRANSFORM_FLIPPED_180 to adjust it for vulkan.
|
||||||
|
matrix_projection(pass->projection, width, height, WL_OUTPUT_TRANSFORM_FLIPPED_180);
|
||||||
|
|
||||||
|
wlr_buffer_lock(buffer->wlr_buffer);
|
||||||
|
pass->render_buffer = buffer;
|
||||||
|
pass->command_buffer = cb;
|
||||||
|
return pass;
|
||||||
|
}
|
@ -58,13 +58,6 @@ struct wlr_vk_renderer *vulkan_get_renderer(struct wlr_renderer *wlr_renderer) {
|
|||||||
static struct wlr_vk_render_format_setup *find_or_create_render_setup(
|
static struct wlr_vk_render_format_setup *find_or_create_render_setup(
|
||||||
struct wlr_vk_renderer *renderer, VkFormat format, bool has_blending_buffer);
|
struct wlr_vk_renderer *renderer, VkFormat format, bool has_blending_buffer);
|
||||||
|
|
||||||
// vertex shader push constant range data
|
|
||||||
struct vert_pcr_data {
|
|
||||||
float mat4[4][4];
|
|
||||||
float uv_off[2];
|
|
||||||
float uv_size[2];
|
|
||||||
};
|
|
||||||
|
|
||||||
// https://www.w3.org/Graphics/Color/srgb
|
// https://www.w3.org/Graphics/Color/srgb
|
||||||
static float color_to_linear(float non_linear) {
|
static float color_to_linear(float non_linear) {
|
||||||
return (non_linear > 0.04045) ?
|
return (non_linear > 0.04045) ?
|
||||||
@ -360,16 +353,9 @@ error_alloc:
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct wlr_vk_command_buffer *acquire_command_buffer(
|
|
||||||
struct wlr_vk_renderer *renderer);
|
|
||||||
static uint64_t end_command_buffer(struct wlr_vk_command_buffer *cb,
|
|
||||||
struct wlr_vk_renderer *renderer);
|
|
||||||
static bool wait_command_buffer(struct wlr_vk_command_buffer *cb,
|
|
||||||
struct wlr_vk_renderer *renderer);
|
|
||||||
|
|
||||||
VkCommandBuffer vulkan_record_stage_cb(struct wlr_vk_renderer *renderer) {
|
VkCommandBuffer vulkan_record_stage_cb(struct wlr_vk_renderer *renderer) {
|
||||||
if (renderer->stage.cb == NULL) {
|
if (renderer->stage.cb == NULL) {
|
||||||
renderer->stage.cb = acquire_command_buffer(renderer);
|
renderer->stage.cb = vulkan_acquire_command_buffer(renderer);
|
||||||
if (renderer->stage.cb == NULL) {
|
if (renderer->stage.cb == NULL) {
|
||||||
return VK_NULL_HANDLE;
|
return VK_NULL_HANDLE;
|
||||||
}
|
}
|
||||||
@ -391,7 +377,7 @@ bool vulkan_submit_stage_wait(struct wlr_vk_renderer *renderer) {
|
|||||||
struct wlr_vk_command_buffer *cb = renderer->stage.cb;
|
struct wlr_vk_command_buffer *cb = renderer->stage.cb;
|
||||||
renderer->stage.cb = NULL;
|
renderer->stage.cb = NULL;
|
||||||
|
|
||||||
uint64_t timeline_point = end_command_buffer(cb, renderer);
|
uint64_t timeline_point = vulkan_end_command_buffer(cb, renderer);
|
||||||
if (timeline_point == 0) {
|
if (timeline_point == 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -418,7 +404,7 @@ bool vulkan_submit_stage_wait(struct wlr_vk_renderer *renderer) {
|
|||||||
// NOTE: don't release stage allocations here since they may still be
|
// NOTE: don't release stage allocations here since they may still be
|
||||||
// used for reading. Will be done next frame.
|
// used for reading. Will be done next frame.
|
||||||
|
|
||||||
return wait_command_buffer(cb, renderer);
|
return vulkan_wait_command_buffer(cb, renderer);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct wlr_vk_format_props *vulkan_format_props_from_drm(
|
struct wlr_vk_format_props *vulkan_format_props_from_drm(
|
||||||
@ -456,7 +442,7 @@ static bool init_command_buffer(struct wlr_vk_command_buffer *cb,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool wait_command_buffer(struct wlr_vk_command_buffer *cb,
|
bool vulkan_wait_command_buffer(struct wlr_vk_command_buffer *cb,
|
||||||
struct wlr_vk_renderer *renderer) {
|
struct wlr_vk_renderer *renderer) {
|
||||||
VkResult res;
|
VkResult res;
|
||||||
|
|
||||||
@ -546,13 +532,13 @@ static struct wlr_vk_command_buffer *get_command_buffer(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Block until a busy command buffer becomes available
|
// Block until a busy command buffer becomes available
|
||||||
if (!wait_command_buffer(wait, renderer)) {
|
if (!vulkan_wait_command_buffer(wait, renderer)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return wait;
|
return wait;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct wlr_vk_command_buffer *acquire_command_buffer(
|
struct wlr_vk_command_buffer *vulkan_acquire_command_buffer(
|
||||||
struct wlr_vk_renderer *renderer) {
|
struct wlr_vk_renderer *renderer) {
|
||||||
struct wlr_vk_command_buffer *cb = get_command_buffer(renderer);
|
struct wlr_vk_command_buffer *cb = get_command_buffer(renderer);
|
||||||
if (cb == NULL) {
|
if (cb == NULL) {
|
||||||
@ -565,7 +551,7 @@ static struct wlr_vk_command_buffer *acquire_command_buffer(
|
|||||||
return cb;
|
return cb;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t end_command_buffer(struct wlr_vk_command_buffer *cb,
|
uint64_t vulkan_end_command_buffer(struct wlr_vk_command_buffer *cb,
|
||||||
struct wlr_vk_renderer *renderer) {
|
struct wlr_vk_renderer *renderer) {
|
||||||
assert(cb->recording);
|
assert(cb->recording);
|
||||||
cb->recording = false;
|
cb->recording = false;
|
||||||
@ -900,7 +886,7 @@ static bool vulkan_begin(struct wlr_renderer *wlr_renderer,
|
|||||||
struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
|
struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
|
||||||
assert(renderer->current_render_buffer);
|
assert(renderer->current_render_buffer);
|
||||||
|
|
||||||
struct wlr_vk_command_buffer *cb = acquire_command_buffer(renderer);
|
struct wlr_vk_command_buffer *cb = vulkan_acquire_command_buffer(renderer);
|
||||||
if (cb == NULL) {
|
if (cb == NULL) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -949,7 +935,7 @@ static bool vulkan_begin(struct wlr_renderer *wlr_renderer,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool vulkan_sync_foreign_texture(struct wlr_vk_texture *texture) {
|
bool vulkan_sync_foreign_texture(struct wlr_vk_texture *texture) {
|
||||||
struct wlr_vk_renderer *renderer = texture->renderer;
|
struct wlr_vk_renderer *renderer = texture->renderer;
|
||||||
VkResult res;
|
VkResult res;
|
||||||
|
|
||||||
@ -1019,18 +1005,17 @@ static bool vulkan_sync_foreign_texture(struct wlr_vk_texture *texture) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool vulkan_sync_render_buffer(struct wlr_vk_renderer *renderer,
|
bool vulkan_sync_render_buffer(struct wlr_vk_renderer *renderer,
|
||||||
struct wlr_vk_command_buffer *cb) {
|
struct wlr_vk_render_buffer *render_buffer, struct wlr_vk_command_buffer *cb) {
|
||||||
VkResult res;
|
VkResult res;
|
||||||
|
|
||||||
if (!renderer->dev->implicit_sync_interop) {
|
if (!renderer->dev->implicit_sync_interop) {
|
||||||
// We have no choice but to block here sadly
|
// We have no choice but to block here sadly
|
||||||
return wait_command_buffer(cb, renderer);
|
return vulkan_wait_command_buffer(cb, renderer);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct wlr_dmabuf_attributes dmabuf = {0};
|
struct wlr_dmabuf_attributes dmabuf = {0};
|
||||||
if (!wlr_buffer_get_dmabuf(renderer->current_render_buffer->wlr_buffer,
|
if (!wlr_buffer_get_dmabuf(render_buffer->wlr_buffer, &dmabuf)) {
|
||||||
&dmabuf)) {
|
|
||||||
wlr_log(WLR_ERROR, "wlr_buffer_get_dmabuf failed");
|
wlr_log(WLR_ERROR, "wlr_buffer_get_dmabuf failed");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1095,7 +1080,7 @@ static void vulkan_end(struct wlr_renderer *wlr_renderer) {
|
|||||||
0.f, renderer->render_height, -1.f,
|
0.f, renderer->render_height, -1.f,
|
||||||
0.f, 0.f, 0.f,
|
0.f, 0.f, 0.f,
|
||||||
};
|
};
|
||||||
struct vert_pcr_data vert_pcr_data;
|
struct wlr_vk_vert_pcr_data vert_pcr_data;
|
||||||
mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
|
mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
|
||||||
vert_pcr_data.uv_off[0] = 0.f;
|
vert_pcr_data.uv_off[0] = 0.f;
|
||||||
vert_pcr_data.uv_off[1] = 0.f;
|
vert_pcr_data.uv_off[1] = 0.f;
|
||||||
@ -1275,7 +1260,7 @@ static void vulkan_end(struct wlr_renderer *wlr_renderer) {
|
|||||||
// We don't need a semaphore from the stage/transfer submission
|
// We don't need a semaphore from the stage/transfer submission
|
||||||
// to the render submissions since they are on the same queue
|
// to the render submissions since they are on the same queue
|
||||||
// and we have a renderpass dependency for that.
|
// and we have a renderpass dependency for that.
|
||||||
uint64_t stage_timeline_point = end_command_buffer(stage_cb, renderer);
|
uint64_t stage_timeline_point = vulkan_end_command_buffer(stage_cb, renderer);
|
||||||
if (stage_timeline_point == 0) {
|
if (stage_timeline_point == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1312,7 +1297,7 @@ static void vulkan_end(struct wlr_renderer *wlr_renderer) {
|
|||||||
|
|
||||||
renderer->stage.last_timeline_point = stage_timeline_point;
|
renderer->stage.last_timeline_point = stage_timeline_point;
|
||||||
|
|
||||||
uint64_t render_timeline_point = end_command_buffer(render_cb, renderer);
|
uint64_t render_timeline_point = vulkan_end_command_buffer(render_cb, renderer);
|
||||||
if (render_timeline_point == 0) {
|
if (render_timeline_point == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1384,12 +1369,12 @@ static void vulkan_end(struct wlr_renderer *wlr_renderer) {
|
|||||||
wl_list_insert(&stage_cb->stage_buffers, &stage_buf->link);
|
wl_list_insert(&stage_cb->stage_buffers, &stage_buf->link);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vulkan_sync_render_buffer(renderer, render_cb)) {
|
if (!vulkan_sync_render_buffer(renderer, renderer->current_render_buffer, render_cb)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static VkPipeline get_texture_pipeline(struct wlr_vk_texture *texture,
|
VkPipeline vulkan_get_texture_pipeline(struct wlr_vk_texture *texture,
|
||||||
struct wlr_vk_render_format_setup *render_setup) {
|
struct wlr_vk_render_format_setup *render_setup) {
|
||||||
if (texture->format->is_ycbcr) {
|
if (texture->format->is_ycbcr) {
|
||||||
size_t pipeline_layout_index = texture->pipeline_layout - texture->renderer->ycbcr_pipeline_layouts;
|
size_t pipeline_layout_index = texture->pipeline_layout - texture->renderer->ycbcr_pipeline_layouts;
|
||||||
@ -1426,7 +1411,7 @@ static bool vulkan_render_subtexture_with_matrix(struct wlr_renderer *wlr_render
|
|||||||
}
|
}
|
||||||
|
|
||||||
VkPipelineLayout pipe_layout = texture->pipeline_layout->vk;
|
VkPipelineLayout pipe_layout = texture->pipeline_layout->vk;
|
||||||
VkPipeline pipe = get_texture_pipeline(texture, renderer->current_render_buffer->render_setup);
|
VkPipeline pipe = vulkan_get_texture_pipeline(texture, renderer->current_render_buffer->render_setup);
|
||||||
|
|
||||||
if (pipe != renderer->bound_pipe) {
|
if (pipe != renderer->bound_pipe) {
|
||||||
vkCmdBindPipeline(cb, VK_PIPELINE_BIND_POINT_GRAPHICS, pipe);
|
vkCmdBindPipeline(cb, VK_PIPELINE_BIND_POINT_GRAPHICS, pipe);
|
||||||
@ -1439,7 +1424,7 @@ static bool vulkan_render_subtexture_with_matrix(struct wlr_renderer *wlr_render
|
|||||||
float final_matrix[9];
|
float final_matrix[9];
|
||||||
wlr_matrix_multiply(final_matrix, renderer->projection, matrix);
|
wlr_matrix_multiply(final_matrix, renderer->projection, matrix);
|
||||||
|
|
||||||
struct vert_pcr_data vert_pcr_data;
|
struct wlr_vk_vert_pcr_data vert_pcr_data;
|
||||||
mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
|
mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
|
||||||
|
|
||||||
vert_pcr_data.uv_off[0] = box->x / wlr_texture->width;
|
vert_pcr_data.uv_off[0] = box->x / wlr_texture->width;
|
||||||
@ -1529,7 +1514,7 @@ static void vulkan_render_quad_with_matrix(struct wlr_renderer *wlr_renderer,
|
|||||||
float final_matrix[9];
|
float final_matrix[9];
|
||||||
wlr_matrix_multiply(final_matrix, renderer->projection, matrix);
|
wlr_matrix_multiply(final_matrix, renderer->projection, matrix);
|
||||||
|
|
||||||
struct vert_pcr_data vert_pcr_data;
|
struct wlr_vk_vert_pcr_data vert_pcr_data;
|
||||||
mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
|
mat3_to_mat4(final_matrix, vert_pcr_data.mat4);
|
||||||
vert_pcr_data.uv_off[0] = 0.f;
|
vert_pcr_data.uv_off[0] = 0.f;
|
||||||
vert_pcr_data.uv_off[1] = 0.f;
|
vert_pcr_data.uv_off[1] = 0.f;
|
||||||
@ -1915,6 +1900,24 @@ static uint32_t vulkan_get_render_buffer_caps(struct wlr_renderer *wlr_renderer)
|
|||||||
return WLR_BUFFER_CAP_DMABUF;
|
return WLR_BUFFER_CAP_DMABUF;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct wlr_render_pass *vulkan_begin_buffer_pass(struct wlr_renderer *wlr_renderer, struct wlr_buffer *buffer) {
|
||||||
|
struct wlr_vk_renderer *renderer = vulkan_get_renderer(wlr_renderer);
|
||||||
|
|
||||||
|
struct wlr_vk_render_buffer *render_buffer = get_render_buffer(renderer, buffer);
|
||||||
|
if (!render_buffer) {
|
||||||
|
render_buffer = create_render_buffer(renderer, buffer);
|
||||||
|
if (!render_buffer) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct wlr_vk_render_pass *render_pass = vulkan_begin_render_pass(renderer, render_buffer);
|
||||||
|
if (render_pass == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return &render_pass->base;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct wlr_renderer_impl renderer_impl = {
|
static const struct wlr_renderer_impl renderer_impl = {
|
||||||
.bind_buffer = vulkan_bind_buffer,
|
.bind_buffer = vulkan_bind_buffer,
|
||||||
.begin = vulkan_begin,
|
.begin = vulkan_begin,
|
||||||
@ -1932,6 +1935,7 @@ static const struct wlr_renderer_impl renderer_impl = {
|
|||||||
.get_drm_fd = vulkan_get_drm_fd,
|
.get_drm_fd = vulkan_get_drm_fd,
|
||||||
.get_render_buffer_caps = vulkan_get_render_buffer_caps,
|
.get_render_buffer_caps = vulkan_get_render_buffer_caps,
|
||||||
.texture_from_buffer = vulkan_texture_from_buffer,
|
.texture_from_buffer = vulkan_texture_from_buffer,
|
||||||
|
.begin_buffer_pass = vulkan_begin_buffer_pass,
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool init_sampler(struct wlr_vk_renderer *renderer, VkSampler *sampler,
|
static bool init_sampler(struct wlr_vk_renderer *renderer, VkSampler *sampler,
|
||||||
@ -1996,7 +2000,7 @@ static bool init_tex_layouts(struct wlr_vk_renderer *renderer,
|
|||||||
|
|
||||||
VkPushConstantRange pc_ranges[2] = {
|
VkPushConstantRange pc_ranges[2] = {
|
||||||
{
|
{
|
||||||
.size = sizeof(struct vert_pcr_data),
|
.size = sizeof(struct wlr_vk_vert_pcr_data),
|
||||||
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
|
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -2052,7 +2056,7 @@ static bool init_blend_to_output_layouts(struct wlr_vk_renderer *renderer,
|
|||||||
// pipeline layout -- standard vertex uniforms, no shader uniforms
|
// pipeline layout -- standard vertex uniforms, no shader uniforms
|
||||||
VkPushConstantRange pc_ranges[1] = {
|
VkPushConstantRange pc_ranges[1] = {
|
||||||
{
|
{
|
||||||
.size = sizeof(struct vert_pcr_data),
|
.size = sizeof(struct wlr_vk_vert_pcr_data),
|
||||||
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
|
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user