mirror of
https://github.com/slendidev/lunar.git
synced 2025-12-18 13:09:51 +02:00
1706 lines
54 KiB
C++
1706 lines
54 KiB
C++
#define VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
|
|
#include "VulkanRenderer.h"
|
|
|
|
#include <cassert>
|
|
#include <cmath>
|
|
#include <cstring>
|
|
#include <format>
|
|
#include <iostream>
|
|
#include <optional>
|
|
#include <print>
|
|
#include <stdexcept>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
#include <SDL3/SDL_video.h>
|
|
#include <SDL3/SDL_vulkan.h>
|
|
#include <VkBootstrap.h>
|
|
#include <imgui_impl_sdl3.h>
|
|
#include <imgui_impl_vulkan.h>
|
|
|
|
#include "DescriptorLayoutBuilder.h"
|
|
#include "DescriptorWriter.h"
|
|
#include "GraphicsPipelineBuilder.h"
|
|
#include "Util.h"
|
|
|
|
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
|
|
|
|
namespace Lunar {
|
|
|
|
VulkanRenderer::GL::GL(VulkanRenderer &renderer)
|
|
: m_renderer(renderer)
|
|
{
|
|
}
|
|
|
|
auto VulkanRenderer::GL::begin_drawing(vk::CommandBuffer cmd,
|
|
AllocatedImage &color_target, AllocatedImage *depth_target) -> void
|
|
{
|
|
if (m_drawing) {
|
|
end_drawing();
|
|
}
|
|
|
|
m_cmd = cmd;
|
|
m_color_target = &color_target;
|
|
m_depth_target = depth_target;
|
|
m_vertices.clear();
|
|
m_indices.clear();
|
|
m_inside_primitive = false;
|
|
m_drawing = true;
|
|
m_active_pipeline = &m_renderer.m_vk.mesh_pipeline;
|
|
m_transform = smath::Mat4::identity();
|
|
m_current_color = { 1.0f, 1.0f, 1.0f, 1.0f };
|
|
m_current_normal = { 0.0f, 0.0f, 1.0f };
|
|
m_current_uv = { 0.0f, 0.0f };
|
|
m_bound_texture = &m_renderer.m_vk.error_image;
|
|
|
|
auto const extent = vk::Extent2D {
|
|
m_color_target->extent.width,
|
|
m_color_target->extent.height,
|
|
};
|
|
|
|
vk::RenderingAttachmentInfo color_att {};
|
|
vk::ClearValue clear {};
|
|
clear.color = vk::ClearColorValue {
|
|
smath::Vec4 { Colors::DARK_SLATE_GRAY, 1.0f },
|
|
};
|
|
if (m_renderer.m_vk.msaa_samples != vk::SampleCountFlagBits::e1) {
|
|
assert(m_renderer.m_vk.msaa_color_image.image_view
|
|
&& "MSAA enabled but MSAA color image is missing");
|
|
color_att = vkinit::attachment_info(
|
|
m_renderer.m_vk.msaa_color_image.image_view, &clear,
|
|
vk::ImageLayout::eColorAttachmentOptimal);
|
|
color_att.resolveMode = vk::ResolveModeFlagBits::eAverage;
|
|
color_att.resolveImageView = m_color_target->image_view;
|
|
color_att.resolveImageLayout = vk::ImageLayout::eColorAttachmentOptimal;
|
|
color_att.storeOp = vk::AttachmentStoreOp::eDontCare;
|
|
} else {
|
|
color_att = vkinit::attachment_info(m_color_target->image_view, &clear,
|
|
vk::ImageLayout::eColorAttachmentOptimal);
|
|
}
|
|
std::optional<vk::RenderingAttachmentInfo> depth_att;
|
|
if (m_depth_target) {
|
|
depth_att = vkinit::depth_attachment_info(m_depth_target->image_view,
|
|
vk::ImageLayout::eDepthAttachmentOptimal);
|
|
}
|
|
|
|
auto render_info { vkinit::render_info(
|
|
extent, &color_att, depth_att ? &*depth_att : nullptr) };
|
|
m_cmd.beginRendering(render_info);
|
|
|
|
vk::Viewport viewport {};
|
|
viewport.x = 0.0f;
|
|
viewport.y = 0.0f;
|
|
viewport.width = static_cast<float>(extent.width);
|
|
viewport.height = static_cast<float>(extent.height);
|
|
viewport.minDepth = 0.0f;
|
|
viewport.maxDepth = 1.0f;
|
|
m_cmd.setViewport(0, viewport);
|
|
|
|
vk::Rect2D scissor {};
|
|
scissor.offset.x = 0;
|
|
scissor.offset.y = 0;
|
|
scissor.extent = extent;
|
|
m_cmd.setScissor(0, scissor);
|
|
|
|
bind_pipeline_if_needed();
|
|
}
|
|
|
|
auto VulkanRenderer::GL::end_drawing() -> void
|
|
{
|
|
if (!m_drawing)
|
|
return;
|
|
|
|
if (m_inside_primitive) {
|
|
end();
|
|
}
|
|
|
|
flush();
|
|
m_cmd.endRendering();
|
|
|
|
m_cmd = nullptr;
|
|
m_color_target = nullptr;
|
|
m_depth_target = nullptr;
|
|
m_drawing = false;
|
|
m_active_pipeline = nullptr;
|
|
}
|
|
|
|
auto VulkanRenderer::GL::begin(GeometryKind kind) -> void
|
|
{
|
|
assert(m_drawing && "begin_drawing must be called first");
|
|
if (m_inside_primitive) {
|
|
end();
|
|
}
|
|
|
|
m_current_kind = kind;
|
|
m_primitive_start = m_vertices.size();
|
|
m_inside_primitive = true;
|
|
}
|
|
|
|
auto VulkanRenderer::GL::color(smath::Vec3 const &rgb) -> void
|
|
{
|
|
m_current_color = smath::Vec4 { rgb, 1.0f };
|
|
}
|
|
|
|
auto VulkanRenderer::GL::color(smath::Vec4 const &rgba) -> void
|
|
{
|
|
m_current_color = rgba;
|
|
}
|
|
|
|
auto VulkanRenderer::GL::uv(smath::Vec2 const &uv) -> void
|
|
{
|
|
m_current_uv = uv;
|
|
}
|
|
|
|
auto VulkanRenderer::GL::normal(smath::Vec3 const &normal) -> void
|
|
{
|
|
m_current_normal = normal;
|
|
}
|
|
|
|
auto VulkanRenderer::GL::set_texture(
|
|
std::optional<AllocatedImage const *> texture) -> void
|
|
{
|
|
assert(m_drawing && "begin_drawing must be called first");
|
|
flush();
|
|
m_bound_texture = texture.value_or(&m_renderer.m_vk.error_image);
|
|
}
|
|
|
|
auto VulkanRenderer::GL::end() -> void
|
|
{
|
|
if (!m_inside_primitive)
|
|
return;
|
|
|
|
auto const count = m_vertices.size() - m_primitive_start;
|
|
emit_indices(m_primitive_start, count);
|
|
m_inside_primitive = false;
|
|
}
|
|
|
|
auto VulkanRenderer::GL::flush() -> void
|
|
{
|
|
if (!m_drawing || m_vertices.empty() || m_indices.empty())
|
|
return;
|
|
|
|
auto const vertex_data_size { m_vertices.size() * sizeof(Vertex) };
|
|
auto const index_data_size { m_indices.size() * sizeof(uint32_t) };
|
|
auto const staging_size { vertex_data_size + index_data_size };
|
|
|
|
auto staging = m_renderer.create_buffer(staging_size,
|
|
vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY);
|
|
|
|
void *staging_dst = staging.info.pMappedData;
|
|
bool staging_mapped_here { false };
|
|
if (!staging_dst) {
|
|
VkResult res = vmaMapMemory(
|
|
m_renderer.m_vk.allocator, staging.allocation, &staging_dst);
|
|
assert(res == VK_SUCCESS);
|
|
staging_mapped_here = true;
|
|
}
|
|
memcpy(staging_dst, m_vertices.data(), vertex_data_size);
|
|
memcpy(reinterpret_cast<uint8_t *>(staging_dst) + vertex_data_size,
|
|
m_indices.data(), index_data_size);
|
|
if (staging_mapped_here) {
|
|
vmaUnmapMemory(m_renderer.m_vk.allocator, staging.allocation);
|
|
}
|
|
|
|
auto vertex_buffer { m_renderer.create_buffer(vertex_data_size,
|
|
vk::BufferUsageFlagBits::eVertexBuffer
|
|
| vk::BufferUsageFlagBits::eTransferDst
|
|
| vk::BufferUsageFlagBits::eShaderDeviceAddress,
|
|
VMA_MEMORY_USAGE_GPU_ONLY) };
|
|
auto index_buffer { m_renderer.create_buffer(index_data_size,
|
|
vk::BufferUsageFlagBits::eIndexBuffer
|
|
| vk::BufferUsageFlagBits::eTransferDst,
|
|
VMA_MEMORY_USAGE_GPU_ONLY) };
|
|
|
|
m_renderer.immediate_submit(
|
|
[&](vk::CommandBuffer cmd) {
|
|
vk::BufferCopy vertex_copy {};
|
|
vertex_copy.srcOffset = 0;
|
|
vertex_copy.dstOffset = 0;
|
|
vertex_copy.size = vertex_data_size;
|
|
cmd.copyBuffer(
|
|
staging.buffer, vertex_buffer.buffer, 1, &vertex_copy);
|
|
|
|
vk::BufferCopy index_copy {};
|
|
index_copy.srcOffset = vertex_data_size;
|
|
index_copy.dstOffset = 0;
|
|
index_copy.size = index_data_size;
|
|
cmd.copyBuffer(staging.buffer, index_buffer.buffer, 1, &index_copy);
|
|
},
|
|
/*flush_frame_deletion_queue=*/false,
|
|
/*clear_frame_descriptors=*/false);
|
|
|
|
m_renderer.destroy_buffer(staging);
|
|
|
|
auto cmd { m_cmd };
|
|
|
|
bind_pipeline_if_needed();
|
|
|
|
if (m_active_pipeline == &m_renderer.m_vk.mesh_pipeline) {
|
|
auto const image_set {
|
|
m_renderer.m_vk.get_current_frame().frame_descriptors.allocate(
|
|
m_renderer.m_logger, m_renderer.m_vkb.dev.device,
|
|
m_renderer.m_vk.single_image_descriptor_layout)
|
|
};
|
|
|
|
auto const *image
|
|
= m_bound_texture ? m_bound_texture : &m_renderer.m_vk.error_image;
|
|
DescriptorWriter()
|
|
.write_image(0, image->image_view,
|
|
m_renderer.m_vk.default_sampler_nearest.get(),
|
|
static_cast<VkImageLayout>(
|
|
vk::ImageLayout::eShaderReadOnlyOptimal),
|
|
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
|
|
.update_set(m_renderer.m_vkb.dev.device, image_set);
|
|
|
|
auto vk_image_set = vk::DescriptorSet { image_set };
|
|
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics,
|
|
m_renderer.m_vk.mesh_pipeline.get_layout(), 0, vk_image_set, {});
|
|
|
|
GPUDrawPushConstants push_constants {};
|
|
push_constants.world_matrix = m_transform;
|
|
|
|
vk::BufferDeviceAddressInfo device_address_info {};
|
|
device_address_info.buffer = vertex_buffer.buffer;
|
|
push_constants.vertex_buffer
|
|
= m_renderer.m_device.getBufferAddress(device_address_info);
|
|
|
|
cmd.pushConstants(m_renderer.m_vk.mesh_pipeline.get_layout(),
|
|
vk::ShaderStageFlagBits::eVertex, 0, sizeof(push_constants),
|
|
&push_constants);
|
|
}
|
|
|
|
cmd.bindIndexBuffer(index_buffer.buffer, 0, vk::IndexType::eUint32);
|
|
cmd.drawIndexed(static_cast<uint32_t>(m_indices.size()), 1, 0, 0, 0);
|
|
|
|
m_renderer.m_vk.get_current_frame().deletion_queue.emplace([=, this]() {
|
|
m_renderer.destroy_buffer(index_buffer);
|
|
m_renderer.destroy_buffer(vertex_buffer);
|
|
});
|
|
|
|
m_vertices.clear();
|
|
m_indices.clear();
|
|
}
|
|
|
|
auto VulkanRenderer::GL::use_pipeline(Pipeline &pipeline) -> void
|
|
{
|
|
if (&pipeline == m_active_pipeline) {
|
|
return;
|
|
}
|
|
|
|
flush();
|
|
|
|
m_active_pipeline = &pipeline;
|
|
bind_pipeline_if_needed();
|
|
}
|
|
|
|
auto VulkanRenderer::GL::set_transform(smath::Mat4 const &transform) -> void
|
|
{
|
|
flush();
|
|
m_transform = transform;
|
|
}
|
|
|
|
auto VulkanRenderer::GL::draw_rectangle(smath::Vec2 pos, smath::Vec2 size,
|
|
smath::Vec4 rect_color, float rotation) -> void
|
|
{
|
|
auto const half_size = size * 0.5f;
|
|
auto const center = pos + half_size;
|
|
|
|
auto rotate = [&](smath::Vec2 const &p) {
|
|
float const c = std::cos(rotation);
|
|
float const s = std::sin(rotation);
|
|
return smath::Vec2 { c * p.x() - s * p.y(), s * p.x() + c * p.y() };
|
|
};
|
|
|
|
auto const br
|
|
= center + rotate(smath::Vec2 { half_size.x(), -half_size.y() });
|
|
auto const tr
|
|
= center + rotate(smath::Vec2 { half_size.x(), half_size.y() });
|
|
auto const bl
|
|
= center + rotate(smath::Vec2 { -half_size.x(), -half_size.y() });
|
|
auto const tl
|
|
= center + rotate(smath::Vec2 { -half_size.x(), half_size.y() });
|
|
|
|
begin(GeometryKind::Quads);
|
|
|
|
color(rect_color);
|
|
uv(smath::Vec2 { 1.0f, 1.0f });
|
|
vert(smath::Vec3 { br.x(), br.y(), 0.0f });
|
|
|
|
color(rect_color);
|
|
uv(smath::Vec2 { 1.0f, 0.0f });
|
|
vert(smath::Vec3 { tr.x(), tr.y(), 0.0f });
|
|
|
|
color(rect_color);
|
|
uv(smath::Vec2 { 0.0f, 1.0f });
|
|
vert(smath::Vec3 { bl.x(), bl.y(), 0.0f });
|
|
|
|
color(rect_color);
|
|
uv(smath::Vec2 { 0.0f, 0.0f });
|
|
vert(smath::Vec3 { tl.x(), tl.y(), 0.0f });
|
|
|
|
end();
|
|
}
|
|
|
|
auto VulkanRenderer::GL::draw_mesh(GPUMeshBuffers const &mesh,
|
|
smath::Mat4 const &transform, uint32_t index_count, uint32_t first_index,
|
|
int32_t vertex_offset) -> void
|
|
{
|
|
assert(m_drawing && "begin_drawing must be called first");
|
|
|
|
flush();
|
|
use_pipeline(m_renderer.m_vk.mesh_pipeline);
|
|
|
|
auto const image_set {
|
|
m_renderer.m_vk.get_current_frame().frame_descriptors.allocate(
|
|
m_renderer.m_logger, m_renderer.m_vkb.dev.device,
|
|
m_renderer.m_vk.single_image_descriptor_layout)
|
|
};
|
|
auto const *image
|
|
= m_bound_texture ? m_bound_texture : &m_renderer.m_vk.error_image;
|
|
DescriptorWriter()
|
|
.write_image(0, image->image_view,
|
|
m_renderer.m_vk.default_sampler_nearest.get(),
|
|
static_cast<VkImageLayout>(vk::ImageLayout::eShaderReadOnlyOptimal),
|
|
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
|
|
.update_set(m_renderer.m_vkb.dev.device, image_set);
|
|
|
|
auto vk_image_set = vk::DescriptorSet { image_set };
|
|
m_cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics,
|
|
m_renderer.m_vk.mesh_pipeline.get_layout(), 0, vk_image_set, {});
|
|
|
|
GPUDrawPushConstants push_constants {};
|
|
push_constants.world_matrix = transform;
|
|
push_constants.vertex_buffer = mesh.vertex_buffer_address;
|
|
|
|
m_cmd.pushConstants(m_renderer.m_vk.mesh_pipeline.get_layout(),
|
|
vk::ShaderStageFlagBits::eVertex, 0, sizeof(push_constants),
|
|
&push_constants);
|
|
|
|
m_cmd.bindIndexBuffer(mesh.index_buffer.buffer, 0, vk::IndexType::eUint32);
|
|
m_cmd.drawIndexed(index_count, 1, first_index, vertex_offset, 0);
|
|
}
|
|
|
|
auto VulkanRenderer::GL::push_vertex(smath::Vec3 const &pos) -> void
|
|
{
|
|
assert(m_drawing && "begin_drawing must be called first");
|
|
|
|
Vertex v {};
|
|
v.position = pos;
|
|
v.u = m_current_uv.x();
|
|
v.v = m_current_uv.y();
|
|
v.normal = m_current_normal;
|
|
v.color = m_current_color;
|
|
|
|
m_vertices.emplace_back(v);
|
|
}
|
|
|
|
auto VulkanRenderer::GL::emit_indices(size_t start, size_t count) -> void
|
|
{
|
|
switch (m_current_kind) {
|
|
case GeometryKind::Triangles: {
|
|
for (size_t i = 0; (i + 2) < count; i += 3) {
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 0));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 2));
|
|
}
|
|
break;
|
|
}
|
|
case GeometryKind::TriangleStrip: {
|
|
if (count < 3)
|
|
break;
|
|
for (size_t i = 0; (i + 2) < count; i++) {
|
|
if (i % 2 == 0) {
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 0));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 2));
|
|
} else {
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 0));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 2));
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
case GeometryKind::TriangleFan: {
|
|
if (count < 3)
|
|
break;
|
|
for (size_t i = 1; (i + 1) < count; i++) {
|
|
m_indices.emplace_back(static_cast<uint32_t>(start));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i));
|
|
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
|
|
}
|
|
break;
|
|
}
|
|
case GeometryKind::Quads: {
|
|
if (count < 4)
|
|
break;
|
|
|
|
size_t const quad_count { count / 4 };
|
|
for (size_t q = 0; q < quad_count; q++) {
|
|
size_t const base = start + q * 4;
|
|
m_indices.emplace_back(static_cast<uint32_t>(base + 0));
|
|
m_indices.emplace_back(static_cast<uint32_t>(base + 1));
|
|
m_indices.emplace_back(static_cast<uint32_t>(base + 2));
|
|
|
|
m_indices.emplace_back(static_cast<uint32_t>(base + 2));
|
|
m_indices.emplace_back(static_cast<uint32_t>(base + 1));
|
|
m_indices.emplace_back(static_cast<uint32_t>(base + 3));
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
auto VulkanRenderer::GL::bind_pipeline_if_needed() -> void
|
|
{
|
|
if (!m_drawing || !m_active_pipeline)
|
|
return;
|
|
|
|
m_cmd.bindPipeline(
|
|
vk::PipelineBindPoint::eGraphics, m_active_pipeline->get());
|
|
}
|
|
|
|
VulkanRenderer::VulkanRenderer(SDL_Window *window, Logger &logger)
|
|
: gl(*this)
|
|
, m_window(window)
|
|
, m_logger(logger)
|
|
{
|
|
if (m_window == nullptr) {
|
|
throw std::runtime_error("VulkanRenderer requires a valid window");
|
|
}
|
|
|
|
vk_init();
|
|
swapchain_init();
|
|
commands_init();
|
|
sync_init();
|
|
descriptors_init();
|
|
pipelines_init();
|
|
default_data_init();
|
|
imgui_init();
|
|
}
|
|
|
|
VulkanRenderer::~VulkanRenderer()
|
|
{
|
|
m_device.waitIdle();
|
|
|
|
for (auto &frame_data : m_vk.frames) {
|
|
frame_data.deletion_queue.flush();
|
|
frame_data.main_command_buffer.reset();
|
|
frame_data.command_pool.reset();
|
|
frame_data.swapchain_semaphore.reset();
|
|
frame_data.render_fence.reset();
|
|
}
|
|
m_vk.present_semaphores.clear();
|
|
m_vk.swapchain_image_views.clear();
|
|
m_vk.imm_command_buffer.reset();
|
|
m_vk.imm_command_pool.reset();
|
|
m_vk.imm_fence.reset();
|
|
m_vk.triangle_pipeline.reset();
|
|
m_vk.mesh_pipeline.reset();
|
|
m_vk.default_sampler_linear.reset();
|
|
m_vk.default_sampler_nearest.reset();
|
|
|
|
destroy_swapchain();
|
|
destroy_draw_image();
|
|
destroy_msaa_color_image();
|
|
destroy_depth_image();
|
|
|
|
m_vk.deletion_queue.flush();
|
|
|
|
if (m_vk.allocator) {
|
|
vmaDestroyAllocator(m_vk.allocator);
|
|
m_vk.allocator = nullptr;
|
|
}
|
|
|
|
if (m_vk.surface) {
|
|
SDL_Vulkan_DestroySurface(
|
|
m_vkb.instance, static_cast<VkSurfaceKHR>(m_vk.surface), nullptr);
|
|
m_vk.surface = nullptr;
|
|
}
|
|
|
|
vkb::destroy_device(m_vkb.dev);
|
|
vkb::destroy_instance(m_vkb.instance);
|
|
}
|
|
|
|
auto VulkanRenderer::resize(uint32_t width, uint32_t height) -> void
|
|
{
|
|
recreate_swapchain(width, height);
|
|
}
|
|
|
|
auto VulkanRenderer::set_antialiasing(AntiAliasingKind kind) -> void
|
|
{
|
|
enqueue_render_command(RenderCommand {
|
|
RenderCommand::SetAntiAliasing { kind },
|
|
});
|
|
}
|
|
|
|
auto VulkanRenderer::apply_antialiasing(AntiAliasingKind kind) -> void
|
|
{
|
|
auto requested_samples = [&](AntiAliasingKind aa) {
|
|
switch (aa) {
|
|
case AntiAliasingKind::NONE:
|
|
return vk::SampleCountFlagBits::e1;
|
|
case AntiAliasingKind::MSAA_2X:
|
|
return vk::SampleCountFlagBits::e2;
|
|
case AntiAliasingKind::MSAA_4X:
|
|
return vk::SampleCountFlagBits::e4;
|
|
case AntiAliasingKind::MSAA_8X:
|
|
return vk::SampleCountFlagBits::e8;
|
|
}
|
|
return vk::SampleCountFlagBits::e1;
|
|
}(kind);
|
|
|
|
auto best_supported = [&](vk::SampleCountFlagBits requested) {
|
|
auto const supported = m_vk.supported_framebuffer_samples;
|
|
|
|
auto pick_if_supported = [&](vk::SampleCountFlagBits candidate) {
|
|
return (supported & candidate) == candidate;
|
|
};
|
|
|
|
if (requested >= vk::SampleCountFlagBits::e64
|
|
&& pick_if_supported(vk::SampleCountFlagBits::e64)) {
|
|
return vk::SampleCountFlagBits::e64;
|
|
}
|
|
if (requested >= vk::SampleCountFlagBits::e32
|
|
&& pick_if_supported(vk::SampleCountFlagBits::e32)) {
|
|
return vk::SampleCountFlagBits::e32;
|
|
}
|
|
if (requested >= vk::SampleCountFlagBits::e16
|
|
&& pick_if_supported(vk::SampleCountFlagBits::e16)) {
|
|
return vk::SampleCountFlagBits::e16;
|
|
}
|
|
if (requested >= vk::SampleCountFlagBits::e8
|
|
&& pick_if_supported(vk::SampleCountFlagBits::e8)) {
|
|
return vk::SampleCountFlagBits::e8;
|
|
}
|
|
if (requested >= vk::SampleCountFlagBits::e4
|
|
&& pick_if_supported(vk::SampleCountFlagBits::e4)) {
|
|
return vk::SampleCountFlagBits::e4;
|
|
}
|
|
if (requested >= vk::SampleCountFlagBits::e2
|
|
&& pick_if_supported(vk::SampleCountFlagBits::e2)) {
|
|
return vk::SampleCountFlagBits::e2;
|
|
}
|
|
return vk::SampleCountFlagBits::e1;
|
|
}(requested_samples);
|
|
|
|
auto kind_for_samples = [](vk::SampleCountFlagBits samples) {
|
|
switch (samples) {
|
|
case vk::SampleCountFlagBits::e2:
|
|
return AntiAliasingKind::MSAA_2X;
|
|
case vk::SampleCountFlagBits::e4:
|
|
return AntiAliasingKind::MSAA_4X;
|
|
case vk::SampleCountFlagBits::e8:
|
|
return AntiAliasingKind::MSAA_8X;
|
|
default:
|
|
return AntiAliasingKind::NONE;
|
|
}
|
|
};
|
|
|
|
auto const effective_kind = kind_for_samples(best_supported);
|
|
if (m_vk.antialiasing_kind == effective_kind
|
|
&& m_vk.msaa_samples == best_supported) {
|
|
return;
|
|
}
|
|
|
|
if (best_supported != requested_samples) {
|
|
m_logger.warn("Requested antialiasing {} but using {}",
|
|
static_cast<int>(kind), static_cast<int>(effective_kind));
|
|
}
|
|
|
|
m_vk.antialiasing_kind = effective_kind;
|
|
m_vk.msaa_samples = best_supported;
|
|
|
|
if (!m_vk.swapchain || m_vk.swapchain_extent.width == 0
|
|
|| m_vk.swapchain_extent.height == 0) {
|
|
return;
|
|
}
|
|
|
|
m_device.waitIdle();
|
|
|
|
create_msaa_color_image(
|
|
m_vk.swapchain_extent.width, m_vk.swapchain_extent.height);
|
|
create_depth_image(
|
|
m_vk.swapchain_extent.width, m_vk.swapchain_extent.height);
|
|
pipelines_init();
|
|
}
|
|
|
|
auto VulkanRenderer::enqueue_render_command(RenderCommand &&command) -> void
|
|
{
|
|
std::scoped_lock lock { m_command_mutex };
|
|
m_pending_render_commands.emplace_back(std::move(command));
|
|
}
|
|
|
|
auto VulkanRenderer::process_render_commands() -> void
|
|
{
|
|
std::vector<RenderCommand> commands;
|
|
{
|
|
std::scoped_lock lock { m_command_mutex };
|
|
commands.swap(m_pending_render_commands);
|
|
}
|
|
|
|
for (auto &command : commands) {
|
|
std::visit(
|
|
[&](auto &&payload) {
|
|
using Payload = std::decay_t<decltype(payload)>;
|
|
if constexpr (std::is_same_v<Payload,
|
|
RenderCommand::SetAntiAliasing>) {
|
|
apply_antialiasing(payload.kind);
|
|
}
|
|
},
|
|
command.payload);
|
|
}
|
|
}
|
|
|
|
auto VulkanRenderer::immediate_submit(
|
|
std::function<void(vk::CommandBuffer cmd)> &&function,
|
|
bool flush_frame_deletion_queue, bool clear_frame_descriptors) -> void
|
|
{
|
|
m_device.resetFences(m_vk.imm_fence.get());
|
|
m_vk.imm_command_buffer.get().reset();
|
|
|
|
auto cmd { m_vk.imm_command_buffer.get() };
|
|
vk::CommandBufferBeginInfo cmd_begin_info {};
|
|
cmd_begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
|
|
cmd.begin(cmd_begin_info);
|
|
|
|
function(cmd);
|
|
|
|
cmd.end();
|
|
|
|
auto cmd_info { vkinit::command_buffer_submit_info(cmd) };
|
|
auto submit { vkinit::submit_info2(&cmd_info, nullptr, nullptr) };
|
|
m_vk.graphics_queue.submit2(submit, m_vk.imm_fence.get());
|
|
|
|
VK_CHECK(m_logger,
|
|
m_device.waitForFences(m_vk.imm_fence.get(), true, 9'999'999'999));
|
|
|
|
if (flush_frame_deletion_queue) {
|
|
m_vk.get_current_frame().deletion_queue.flush();
|
|
}
|
|
if (clear_frame_descriptors) {
|
|
m_vk.get_current_frame().frame_descriptors.clear_pools(
|
|
m_vkb.dev.device);
|
|
}
|
|
}
|
|
|
|
auto VulkanRenderer::vk_init() -> void
|
|
{
|
|
VULKAN_HPP_DEFAULT_DISPATCHER.init(vkGetInstanceProcAddr);
|
|
|
|
vkb::InstanceBuilder instance_builder {};
|
|
instance_builder
|
|
.enable_extension(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME)
|
|
.set_app_name("Lunar")
|
|
.set_engine_name("Lunar")
|
|
.require_api_version(1, 3, 0)
|
|
.set_debug_callback_user_data_pointer(this)
|
|
.set_debug_callback(
|
|
[](VkDebugUtilsMessageSeverityFlagBitsEXT message_severity,
|
|
VkDebugUtilsMessageTypeFlagsEXT message_type,
|
|
VkDebugUtilsMessengerCallbackDataEXT const *callback_data,
|
|
void *user_data) {
|
|
auto renderer { reinterpret_cast<VulkanRenderer *>(user_data) };
|
|
|
|
auto level = Logger::Level::Debug;
|
|
if (message_severity
|
|
& VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
|
|
level = Logger::Level::Error;
|
|
} else if (message_severity
|
|
& VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
|
|
level = Logger::Level::Warning;
|
|
} else if (message_severity
|
|
& VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
|
|
level = Logger::Level::Info;
|
|
}
|
|
|
|
renderer->m_logger.log(level,
|
|
std::format("[Vulkan] [{}] {}",
|
|
vkb::to_string_message_type(message_type),
|
|
callback_data->pMessage));
|
|
|
|
return VK_FALSE;
|
|
});
|
|
#ifndef NDEBUG
|
|
instance_builder.request_validation_layers();
|
|
#endif
|
|
auto const instance_builder_ret { instance_builder.build() };
|
|
if (!instance_builder_ret) {
|
|
std::println(std::cerr, "Failed to create Vulkan instance. Error: {}",
|
|
instance_builder_ret.error().message());
|
|
throw std::runtime_error("App init fail");
|
|
}
|
|
|
|
m_vkb.instance = instance_builder_ret.value();
|
|
m_instance = vk::Instance { m_vkb.instance.instance };
|
|
VULKAN_HPP_DEFAULT_DISPATCHER.init(m_instance);
|
|
|
|
VkSurfaceKHR raw_surface {};
|
|
if (!SDL_Vulkan_CreateSurface(
|
|
m_window, m_vkb.instance, nullptr, &raw_surface)) {
|
|
m_logger.err("Failed to create vulkan surface");
|
|
throw std::runtime_error("App init fail");
|
|
}
|
|
m_vk.surface = vk::SurfaceKHR { raw_surface };
|
|
|
|
vkb::PhysicalDeviceSelector phys_device_selector { m_vkb.instance };
|
|
VkPhysicalDeviceVulkan13Features features_13 {};
|
|
features_13.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES;
|
|
features_13.pNext = nullptr;
|
|
features_13.synchronization2 = VK_TRUE;
|
|
features_13.dynamicRendering = VK_TRUE;
|
|
VkPhysicalDeviceBufferDeviceAddressFeatures
|
|
buffer_device_address_features {};
|
|
buffer_device_address_features.sType
|
|
= VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES;
|
|
buffer_device_address_features.bufferDeviceAddress = VK_TRUE;
|
|
phys_device_selector.set_surface(m_vk.surface)
|
|
.add_desired_extensions({
|
|
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
|
|
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
|
|
VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME,
|
|
VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
|
|
VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
|
|
VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
|
|
VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
|
|
VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
|
|
VK_KHR_MAINTENANCE1_EXTENSION_NAME,
|
|
VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
|
|
VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME,
|
|
VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME,
|
|
})
|
|
.set_required_features_13(features_13)
|
|
.add_required_extension_features(buffer_device_address_features);
|
|
auto physical_device_selector_return { phys_device_selector.select() };
|
|
if (!physical_device_selector_return) {
|
|
std::println(std::cerr,
|
|
"Failed to find Vulkan physical device. Error: {}",
|
|
physical_device_selector_return.error().message());
|
|
throw std::runtime_error("App init fail");
|
|
}
|
|
m_vkb.phys_dev = physical_device_selector_return.value();
|
|
m_physical_device = vk::PhysicalDevice { m_vkb.phys_dev.physical_device };
|
|
|
|
m_logger.info("Chosen Vulkan physical device: {}",
|
|
m_vkb.phys_dev.properties.deviceName);
|
|
|
|
auto const props = m_physical_device.getProperties();
|
|
m_vk.supported_framebuffer_samples
|
|
= props.limits.framebufferColorSampleCounts
|
|
& props.limits.framebufferDepthSampleCounts;
|
|
m_vk.msaa_samples = vk::SampleCountFlagBits::e1;
|
|
m_vk.antialiasing_kind = AntiAliasingKind::NONE;
|
|
|
|
vkb::DeviceBuilder device_builder { m_vkb.phys_dev };
|
|
auto dev_ret { device_builder.build() };
|
|
if (!dev_ret) {
|
|
std::println(std::cerr, "Failed to create Vulkan device. Error: {}",
|
|
dev_ret.error().message());
|
|
throw std::runtime_error("App init fail");
|
|
}
|
|
m_vkb.dev = dev_ret.value();
|
|
m_device = vk::Device { m_vkb.dev.device };
|
|
VULKAN_HPP_DEFAULT_DISPATCHER.init(m_device);
|
|
|
|
auto queue_family_ret { m_vkb.dev.get_queue_index(
|
|
vkb::QueueType::graphics) };
|
|
if (!queue_family_ret) {
|
|
std::println(std::cerr, "Failed to get graphics queue. Error: {}",
|
|
queue_family_ret.error().message());
|
|
throw std::runtime_error("App init fail");
|
|
}
|
|
m_vk.graphics_queue_family = queue_family_ret.value();
|
|
m_vk.graphics_queue = m_device.getQueue(m_vk.graphics_queue_family, 0);
|
|
|
|
VmaAllocatorCreateInfo allocator_ci {};
|
|
allocator_ci.physicalDevice = m_vkb.phys_dev.physical_device;
|
|
allocator_ci.device = m_vkb.dev.device;
|
|
allocator_ci.instance = m_vkb.instance.instance;
|
|
allocator_ci.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT;
|
|
vmaCreateAllocator(&allocator_ci, &m_vk.allocator);
|
|
}
|
|
|
|
auto VulkanRenderer::swapchain_init() -> void
|
|
{
|
|
int w, h;
|
|
SDL_GetWindowSize(m_window, &w, &h);
|
|
create_swapchain(static_cast<uint32_t>(w), static_cast<uint32_t>(h));
|
|
create_draw_image(static_cast<uint32_t>(w), static_cast<uint32_t>(h));
|
|
create_msaa_color_image(static_cast<uint32_t>(w), static_cast<uint32_t>(h));
|
|
create_depth_image(static_cast<uint32_t>(w), static_cast<uint32_t>(h));
|
|
}
|
|
|
|
auto VulkanRenderer::commands_init() -> void
|
|
{
|
|
vk::CommandPoolCreateInfo ci {};
|
|
ci.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
|
|
ci.queueFamilyIndex = m_vk.graphics_queue_family;
|
|
for (auto &frame_data : m_vk.frames) {
|
|
frame_data.command_pool = m_device.createCommandPoolUnique(ci);
|
|
|
|
vk::CommandBufferAllocateInfo ai {};
|
|
ai.commandPool = frame_data.command_pool.get();
|
|
ai.level = vk::CommandBufferLevel::ePrimary;
|
|
ai.commandBufferCount = 1;
|
|
|
|
frame_data.main_command_buffer
|
|
= std::move(m_device.allocateCommandBuffersUnique(ai).front());
|
|
}
|
|
|
|
m_vk.imm_command_pool = m_device.createCommandPoolUnique(ci);
|
|
|
|
vk::CommandBufferAllocateInfo ai {};
|
|
ai.commandPool = m_vk.imm_command_pool.get();
|
|
ai.level = vk::CommandBufferLevel::ePrimary;
|
|
ai.commandBufferCount = 1;
|
|
m_vk.imm_command_buffer
|
|
= std::move(m_device.allocateCommandBuffersUnique(ai).front());
|
|
}
|
|
|
|
auto VulkanRenderer::sync_init() -> void
|
|
{
|
|
vk::FenceCreateInfo fence_ci {};
|
|
fence_ci.flags = vk::FenceCreateFlagBits::eSignaled;
|
|
vk::SemaphoreCreateInfo semaphore_ci {};
|
|
|
|
for (auto &frame_data : m_vk.frames) {
|
|
frame_data.render_fence = m_device.createFenceUnique(fence_ci);
|
|
frame_data.swapchain_semaphore
|
|
= m_device.createSemaphoreUnique(semaphore_ci);
|
|
}
|
|
|
|
m_vk.imm_fence = m_device.createFenceUnique(fence_ci);
|
|
}
|
|
|
|
auto VulkanRenderer::descriptors_init() -> void
|
|
{
|
|
m_vk.deletion_queue.emplace([&]() {
|
|
m_device.destroyDescriptorSetLayout(
|
|
m_vk.gpu_scene_data_descriptor_layout);
|
|
m_device.destroyDescriptorSetLayout(
|
|
m_vk.single_image_descriptor_layout);
|
|
});
|
|
|
|
for (unsigned int i = 0; i < FRAME_OVERLAP; i++) {
|
|
std::vector<DescriptorAllocatorGrowable::PoolSizeRatio> frame_sizes = {
|
|
{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 3 },
|
|
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 3 },
|
|
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 3 },
|
|
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 4 },
|
|
};
|
|
|
|
m_vk.frames[i].frame_descriptors = DescriptorAllocatorGrowable {};
|
|
m_vk.frames[i].frame_descriptors.init(
|
|
m_vkb.dev.device, 1000, frame_sizes);
|
|
|
|
m_vk.deletion_queue.emplace([&, i]() {
|
|
m_vk.frames[i].frame_descriptors.destroy_pools(m_vkb.dev.device);
|
|
});
|
|
}
|
|
|
|
auto scene_layout_raw
|
|
= DescriptorLayoutBuilder()
|
|
.add_binding(0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
|
|
.build(m_logger, m_vkb.dev.device,
|
|
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT);
|
|
m_vk.gpu_scene_data_descriptor_layout
|
|
= vk::DescriptorSetLayout { scene_layout_raw };
|
|
|
|
auto single_layout_raw
|
|
= DescriptorLayoutBuilder()
|
|
.add_binding(0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
|
|
.build(m_logger, m_vkb.dev.device, VK_SHADER_STAGE_FRAGMENT_BIT);
|
|
m_vk.single_image_descriptor_layout
|
|
= vk::DescriptorSetLayout { single_layout_raw };
|
|
}
|
|
|
|
auto VulkanRenderer::pipelines_init() -> void
|
|
{
|
|
triangle_pipeline_init();
|
|
mesh_pipeline_init();
|
|
}
|
|
|
|
auto VulkanRenderer::triangle_pipeline_init() -> void
|
|
{
|
|
Pipeline::Builder builder { m_device, m_logger };
|
|
|
|
uint8_t triangle_vert_shader_data[] {
|
|
#embed "triangle_vert.spv"
|
|
};
|
|
auto triangle_vert_shader = vkutil::load_shader_module(
|
|
std::span<uint8_t>(
|
|
triangle_vert_shader_data, sizeof(triangle_vert_shader_data)),
|
|
m_device);
|
|
if (!triangle_vert_shader) {
|
|
m_logger.err("Failed to load triangle vert shader");
|
|
}
|
|
|
|
uint8_t triangle_frag_shader_data[] {
|
|
#embed "triangle_frag.spv"
|
|
};
|
|
auto triangle_frag_shader = vkutil::load_shader_module(
|
|
std::span<uint8_t>(
|
|
triangle_frag_shader_data, sizeof(triangle_frag_shader_data)),
|
|
m_device);
|
|
if (!triangle_frag_shader) {
|
|
m_logger.err("Failed to load triangle frag shader");
|
|
}
|
|
|
|
m_vk.triangle_pipeline
|
|
= builder.build_graphics([&](GraphicsPipelineBuilder &pipeline_builder)
|
|
-> GraphicsPipelineBuilder & {
|
|
return pipeline_builder
|
|
.set_shaders(
|
|
triangle_vert_shader.get(), triangle_frag_shader.get())
|
|
.set_input_topology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
|
|
.set_polygon_mode(VK_POLYGON_MODE_FILL)
|
|
.set_multisampling(
|
|
static_cast<VkSampleCountFlagBits>(m_vk.msaa_samples))
|
|
.enable_blending_additive()
|
|
.disable_depth_testing()
|
|
.set_color_attachment_format(
|
|
static_cast<VkFormat>(m_vk.draw_image.format))
|
|
.set_depth_format(
|
|
static_cast<VkFormat>(m_vk.depth_image.format));
|
|
});
|
|
}
|
|
|
|
auto VulkanRenderer::mesh_pipeline_init() -> void
|
|
{
|
|
Pipeline::Builder builder { m_device, m_logger };
|
|
|
|
uint8_t triangle_vert_shader_data[] {
|
|
#embed "triangle_mesh_vert.spv"
|
|
};
|
|
auto triangle_vert_shader = vkutil::load_shader_module(
|
|
std::span<uint8_t>(
|
|
triangle_vert_shader_data, sizeof(triangle_vert_shader_data)),
|
|
m_device);
|
|
if (!triangle_vert_shader) {
|
|
m_logger.err("Failed to load triangle vert shader");
|
|
}
|
|
|
|
uint8_t triangle_frag_shader_data[] {
|
|
#embed "tex_image_frag.spv"
|
|
};
|
|
auto triangle_frag_shader = vkutil::load_shader_module(
|
|
std::span<uint8_t>(
|
|
triangle_frag_shader_data, sizeof(triangle_frag_shader_data)),
|
|
m_device);
|
|
if (!triangle_frag_shader) {
|
|
m_logger.err("Failed to load triangle frag shader");
|
|
}
|
|
|
|
vk::PushConstantRange push_constant_range {};
|
|
push_constant_range.stageFlags = vk::ShaderStageFlagBits::eVertex;
|
|
push_constant_range.offset = 0;
|
|
push_constant_range.size = sizeof(GPUDrawPushConstants);
|
|
|
|
std::array push_constant_ranges { push_constant_range };
|
|
builder.set_push_constant_ranges(push_constant_ranges);
|
|
std::array descriptor_set_layouts { m_vk.single_image_descriptor_layout };
|
|
builder.set_descriptor_set_layouts(descriptor_set_layouts);
|
|
|
|
m_vk.mesh_pipeline
|
|
= builder.build_graphics([&](GraphicsPipelineBuilder &pipeline_builder)
|
|
-> GraphicsPipelineBuilder & {
|
|
return pipeline_builder
|
|
.set_shaders(
|
|
triangle_vert_shader.get(), triangle_frag_shader.get())
|
|
.set_input_topology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
|
|
.set_polygon_mode(VK_POLYGON_MODE_FILL)
|
|
.set_cull_mode(VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE)
|
|
.set_multisampling(
|
|
static_cast<VkSampleCountFlagBits>(m_vk.msaa_samples))
|
|
.disable_blending()
|
|
.enable_depth_testing()
|
|
.set_color_attachment_format(
|
|
static_cast<VkFormat>(m_vk.draw_image.format))
|
|
.set_depth_format(
|
|
static_cast<VkFormat>(m_vk.depth_image.format));
|
|
});
|
|
}
|
|
|
|
auto VulkanRenderer::imgui_init() -> void
|
|
{
|
|
VkDescriptorPoolSize pool_sizes[] = {
|
|
{ VK_DESCRIPTOR_TYPE_SAMPLER, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1000 },
|
|
{ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1000 },
|
|
};
|
|
|
|
VkDescriptorPoolCreateInfo pool_info = {};
|
|
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
|
|
pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
|
|
pool_info.maxSets = 1000;
|
|
pool_info.poolSizeCount = (uint32_t)std::size(pool_sizes);
|
|
pool_info.pPoolSizes = pool_sizes;
|
|
|
|
m_vk.imgui_descriptor_pool = m_device.createDescriptorPoolUnique(pool_info);
|
|
|
|
ImGui::CreateContext();
|
|
|
|
ImGui_ImplSDL3_InitForVulkan(m_window);
|
|
|
|
ImGui_ImplVulkan_InitInfo init_info = {};
|
|
init_info.Instance = m_vkb.instance;
|
|
init_info.PhysicalDevice = m_vkb.phys_dev.physical_device;
|
|
init_info.Device = m_vkb.dev.device;
|
|
init_info.Queue = static_cast<VkQueue>(m_vk.graphics_queue);
|
|
init_info.DescriptorPool = m_vk.imgui_descriptor_pool.get();
|
|
init_info.MinImageCount = 3;
|
|
init_info.ImageCount = 3;
|
|
init_info.UseDynamicRendering = true;
|
|
|
|
init_info.PipelineInfoMain.PipelineRenderingCreateInfo.sType
|
|
= VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO;
|
|
init_info.PipelineInfoMain.PipelineRenderingCreateInfo.colorAttachmentCount
|
|
= 1;
|
|
auto swapchain_format = static_cast<VkFormat>(m_vk.swapchain_image_format);
|
|
init_info.PipelineInfoMain.PipelineRenderingCreateInfo
|
|
.pColorAttachmentFormats
|
|
= &swapchain_format;
|
|
|
|
init_info.PipelineInfoMain.MSAASamples = VK_SAMPLE_COUNT_1_BIT;
|
|
|
|
ImGui_ImplVulkan_Init(&init_info);
|
|
|
|
m_vk.deletion_queue.emplace([this]() {
|
|
ImGui_ImplVulkan_Shutdown();
|
|
ImGui_ImplSDL3_Shutdown();
|
|
ImGui::DestroyContext();
|
|
m_vk.imgui_descriptor_pool.reset();
|
|
});
|
|
}
|
|
|
|
auto VulkanRenderer::default_data_init() -> void
|
|
{
|
|
std::array<Vertex, 4> rect_vertices;
|
|
|
|
rect_vertices[0].position = { 0.5, -0.5, 0 };
|
|
rect_vertices[1].position = { 0.5, 0.5, 0 };
|
|
rect_vertices[2].position = { -0.5, -0.5, 0 };
|
|
rect_vertices[3].position = { -0.5, 0.5, 0 };
|
|
|
|
rect_vertices[0].u = 1.0f;
|
|
rect_vertices[0].v = 1.0f;
|
|
rect_vertices[1].u = 1.0f;
|
|
rect_vertices[1].v = 0.0f;
|
|
rect_vertices[2].u = 0.0f;
|
|
rect_vertices[2].v = 1.0f;
|
|
rect_vertices[3].u = 0.0f;
|
|
rect_vertices[3].v = 0.0f;
|
|
|
|
for (auto &v : rect_vertices) {
|
|
v.normal = { 0.0f, 0.0f, 1.0f };
|
|
}
|
|
|
|
rect_vertices[0].color = { 0, 0, 0, 1 };
|
|
rect_vertices[1].color = { 0.5, 0.5, 0.5, 1 };
|
|
rect_vertices[2].color = { 1, 0, 0, 1 };
|
|
rect_vertices[3].color = { 0, 1, 0, 1 };
|
|
|
|
std::array<uint32_t, 6> rect_indices;
|
|
|
|
rect_indices[0] = 0;
|
|
rect_indices[1] = 1;
|
|
rect_indices[2] = 2;
|
|
|
|
rect_indices[3] = 2;
|
|
rect_indices[4] = 1;
|
|
rect_indices[5] = 3;
|
|
|
|
m_vk.rectangle = upload_mesh(rect_indices, rect_vertices);
|
|
|
|
m_vk.test_meshes
|
|
= Mesh::load_gltf_meshes(*this, "assets/basicmesh.glb").value();
|
|
|
|
m_vk.deletion_queue.emplace([&]() {
|
|
for (auto &mesh : m_vk.test_meshes) {
|
|
destroy_buffer(mesh->mesh_buffers.index_buffer);
|
|
destroy_buffer(mesh->mesh_buffers.vertex_buffer);
|
|
}
|
|
|
|
destroy_buffer(m_vk.rectangle.index_buffer);
|
|
destroy_buffer(m_vk.rectangle.vertex_buffer);
|
|
});
|
|
|
|
{
|
|
// Solid color images
|
|
auto const white = smath::pack_unorm4x8(smath::Vec4 { 1, 1, 1, 1 });
|
|
m_vk.white_image = create_image(&white, vk::Extent3D { 1, 1, 1 },
|
|
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
|
|
|
|
auto const black = smath::pack_unorm4x8(smath::Vec4 { 0, 0, 0, 1 });
|
|
m_vk.black_image = create_image(&black, vk::Extent3D { 1, 1, 1 },
|
|
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
|
|
|
|
auto const gray
|
|
= smath::pack_unorm4x8(smath::Vec4 { 0.6f, 0.6f, 0.6f, 1 });
|
|
m_vk.gray_image = create_image(&gray, vk::Extent3D { 1, 1, 1 },
|
|
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
|
|
|
|
// Error checkerboard image
|
|
auto const magenta = smath::pack_unorm4x8(smath::Vec4 { 1, 0, 1, 1 });
|
|
std::array<uint32_t, 16 * 16> checkerboard;
|
|
for (int x = 0; x < 16; x++) {
|
|
for (int y = 0; y < 16; y++) {
|
|
checkerboard[y * 16 + x]
|
|
= ((x % 2) ^ (y % 2)) ? magenta : black;
|
|
}
|
|
}
|
|
m_vk.error_image
|
|
= create_image(checkerboard.data(), vk::Extent3D { 16, 16, 1 },
|
|
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
|
|
}
|
|
|
|
vk::SamplerCreateInfo sampler_ci {};
|
|
|
|
sampler_ci.magFilter = vk::Filter::eNearest;
|
|
sampler_ci.minFilter = vk::Filter::eNearest;
|
|
m_vk.default_sampler_nearest = m_device.createSamplerUnique(sampler_ci);
|
|
|
|
sampler_ci.magFilter = vk::Filter::eLinear;
|
|
sampler_ci.minFilter = vk::Filter::eLinear;
|
|
m_vk.default_sampler_linear = m_device.createSamplerUnique(sampler_ci);
|
|
|
|
m_vk.deletion_queue.emplace([&]() {
|
|
m_vk.default_sampler_linear.reset();
|
|
m_vk.default_sampler_nearest.reset();
|
|
destroy_image(m_vk.error_image);
|
|
destroy_image(m_vk.gray_image);
|
|
destroy_image(m_vk.black_image);
|
|
destroy_image(m_vk.white_image);
|
|
});
|
|
}
|
|
|
|
auto VulkanRenderer::render(std::function<void(GL &)> const &record) -> void
|
|
{
|
|
defer(m_vk.frame_number++);
|
|
|
|
if (!m_vk.swapchain || m_vk.swapchain_extent.width == 0
|
|
|| m_vk.swapchain_extent.height == 0) {
|
|
return;
|
|
}
|
|
|
|
process_render_commands();
|
|
|
|
auto &frame = m_vk.get_current_frame();
|
|
VK_CHECK(m_logger,
|
|
m_device.waitForFences(frame.render_fence.get(), true, 1'000'000'000));
|
|
frame.deletion_queue.flush();
|
|
frame.frame_descriptors.clear_pools(m_vkb.dev.device);
|
|
|
|
auto raw_fence = static_cast<VkFence>(frame.render_fence.get());
|
|
VK_CHECK(m_logger, vkResetFences(m_vkb.dev.device, 1, &raw_fence));
|
|
|
|
auto const acquire_result = m_device.acquireNextImageKHR(
|
|
m_vk.swapchain, 1'000'000'000, frame.swapchain_semaphore.get(), {});
|
|
if (acquire_result.result == vk::Result::eErrorOutOfDateKHR
|
|
|| acquire_result.result == vk::Result::eSuboptimalKHR) {
|
|
int width {}, height {};
|
|
SDL_GetWindowSize(m_window, &width, &height);
|
|
recreate_swapchain(
|
|
static_cast<uint32_t>(width), static_cast<uint32_t>(height));
|
|
return;
|
|
}
|
|
VK_CHECK(m_logger, acquire_result.result);
|
|
uint32_t const swapchain_image_idx { acquire_result.value };
|
|
|
|
auto cmd { frame.main_command_buffer.get() };
|
|
cmd.reset();
|
|
|
|
m_vk.draw_extent.width = m_vk.draw_image.extent.width;
|
|
m_vk.draw_extent.height = m_vk.draw_image.extent.height;
|
|
|
|
vk::CommandBufferBeginInfo cmd_begin_info {};
|
|
cmd_begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
|
|
VK_CHECK(m_logger,
|
|
vkBeginCommandBuffer(static_cast<VkCommandBuffer>(cmd),
|
|
reinterpret_cast<VkCommandBufferBeginInfo *>(&cmd_begin_info)));
|
|
|
|
bool const msaa_enabled = m_vk.msaa_samples != vk::SampleCountFlagBits::e1;
|
|
|
|
vkutil::transition_image(cmd, m_vk.draw_image.image, m_vk.draw_image_layout,
|
|
vk::ImageLayout::eColorAttachmentOptimal);
|
|
m_vk.draw_image_layout = vk::ImageLayout::eColorAttachmentOptimal;
|
|
|
|
if (msaa_enabled) {
|
|
vkutil::transition_image(cmd, m_vk.msaa_color_image.image,
|
|
m_vk.msaa_color_image_layout,
|
|
vk::ImageLayout::eColorAttachmentOptimal);
|
|
m_vk.msaa_color_image_layout = vk::ImageLayout::eColorAttachmentOptimal;
|
|
}
|
|
|
|
vkutil::transition_image(cmd, m_vk.depth_image.image,
|
|
m_vk.depth_image_layout, vk::ImageLayout::eDepthAttachmentOptimal);
|
|
m_vk.depth_image_layout = vk::ImageLayout::eDepthAttachmentOptimal;
|
|
|
|
gl.begin_drawing(cmd, m_vk.draw_image, &m_vk.depth_image);
|
|
if (record) {
|
|
record(gl);
|
|
}
|
|
gl.end_drawing();
|
|
|
|
vkutil::transition_image(cmd, m_vk.draw_image.image, m_vk.draw_image_layout,
|
|
vk::ImageLayout::eTransferSrcOptimal);
|
|
m_vk.draw_image_layout = vk::ImageLayout::eTransferSrcOptimal;
|
|
|
|
vkutil::transition_image(cmd, m_vk.swapchain_images.at(swapchain_image_idx),
|
|
vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal);
|
|
|
|
vkutil::copy_image_to_image(cmd, m_vk.draw_image.image,
|
|
m_vk.swapchain_images.at(swapchain_image_idx), m_vk.draw_extent,
|
|
m_vk.swapchain_extent);
|
|
|
|
vkutil::transition_image(cmd, m_vk.swapchain_images[swapchain_image_idx],
|
|
vk::ImageLayout::eTransferDstOptimal,
|
|
vk::ImageLayout::eColorAttachmentOptimal);
|
|
|
|
draw_imgui(cmd, m_vk.swapchain_image_views.at(swapchain_image_idx).get());
|
|
|
|
vkutil::transition_image(cmd, m_vk.swapchain_images[swapchain_image_idx],
|
|
vk::ImageLayout::eColorAttachmentOptimal,
|
|
vk::ImageLayout::ePresentSrcKHR);
|
|
|
|
cmd.end();
|
|
|
|
auto render_semaphore
|
|
= m_vk.present_semaphores.at(swapchain_image_idx).get();
|
|
vk::PipelineStageFlags2 wait_stage
|
|
= vk::PipelineStageFlagBits2::eColorAttachmentOutput;
|
|
auto wait_info { vkinit::semaphore_submit_info(
|
|
wait_stage, m_vk.get_current_frame().swapchain_semaphore.get()) };
|
|
auto command_buffer_info { vkinit::command_buffer_submit_info(cmd) };
|
|
auto signal_info { vkinit::semaphore_submit_info(
|
|
vk::PipelineStageFlagBits2::eAllCommands, render_semaphore) };
|
|
auto submit_info { vkinit::submit_info2(
|
|
&command_buffer_info, &wait_info, &signal_info) };
|
|
|
|
m_vk.graphics_queue.submit2(
|
|
submit_info, m_vk.get_current_frame().render_fence.get());
|
|
|
|
vk::PresentInfoKHR present_info {};
|
|
present_info.setSwapchains(m_vk.swapchain);
|
|
present_info.setWaitSemaphores(render_semaphore);
|
|
present_info.setImageIndices(swapchain_image_idx);
|
|
|
|
auto const present_result = m_vk.graphics_queue.presentKHR(present_info);
|
|
if (present_result == vk::Result::eErrorOutOfDateKHR
|
|
|| present_result == vk::Result::eSuboptimalKHR) {
|
|
int width {}, height {};
|
|
SDL_GetWindowSize(m_window, &width, &height);
|
|
recreate_swapchain(
|
|
static_cast<uint32_t>(width), static_cast<uint32_t>(height));
|
|
return;
|
|
}
|
|
VK_CHECK(m_logger, present_result);
|
|
}
|
|
|
|
auto VulkanRenderer::draw_imgui(
|
|
vk::CommandBuffer cmd, vk::ImageView target_image_view) -> void
|
|
{
|
|
auto const color_attachment { vkinit::attachment_info(
|
|
target_image_view, nullptr, vk::ImageLayout::eColorAttachmentOptimal) };
|
|
auto const render_info { vkinit::render_info(
|
|
m_vk.draw_extent, &color_attachment, nullptr) };
|
|
|
|
cmd.beginRendering(render_info);
|
|
|
|
ImGui_ImplVulkan_RenderDrawData(
|
|
ImGui::GetDrawData(), static_cast<VkCommandBuffer>(cmd));
|
|
|
|
cmd.endRendering();
|
|
}
|
|
|
|
auto VulkanRenderer::create_swapchain(uint32_t width, uint32_t height) -> void
|
|
{
|
|
vkb::SwapchainBuilder builder { m_vkb.phys_dev, m_vkb.dev, m_vk.surface };
|
|
m_vk.swapchain_image_format = vk::Format::eB8G8R8A8Unorm;
|
|
auto const swapchain_ret { builder
|
|
.set_desired_format({
|
|
.format = static_cast<VkFormat>(m_vk.swapchain_image_format),
|
|
.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
|
|
})
|
|
.set_desired_present_mode(VK_PRESENT_MODE_FIFO_KHR)
|
|
.set_desired_extent(width, height)
|
|
.add_image_usage_flags(VK_IMAGE_USAGE_TRANSFER_DST_BIT)
|
|
.build() };
|
|
if (!swapchain_ret) {
|
|
std::println(std::cerr, "Failed to create swapchain. Error: {}",
|
|
swapchain_ret.error().message());
|
|
throw std::runtime_error("App init fail");
|
|
}
|
|
m_vkb.swapchain = swapchain_ret.value();
|
|
|
|
m_vk.swapchain = m_vkb.swapchain.swapchain;
|
|
m_vk.swapchain_extent = vk::Extent2D { m_vkb.swapchain.extent.width,
|
|
m_vkb.swapchain.extent.height };
|
|
auto images = m_vkb.swapchain.get_images().value();
|
|
m_vk.swapchain_images.assign(images.begin(), images.end());
|
|
|
|
m_vk.swapchain_image_views.clear();
|
|
for (auto img : m_vk.swapchain_images) {
|
|
vk::ImageViewCreateInfo iv_ci {};
|
|
iv_ci.image = img;
|
|
iv_ci.viewType = vk::ImageViewType::e2D;
|
|
iv_ci.format = m_vk.swapchain_image_format;
|
|
iv_ci.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
|
|
iv_ci.subresourceRange.levelCount = 1;
|
|
iv_ci.subresourceRange.layerCount = 1;
|
|
m_vk.swapchain_image_views.emplace_back(
|
|
m_device.createImageViewUnique(iv_ci));
|
|
}
|
|
|
|
vk::SemaphoreCreateInfo semaphore_ci {};
|
|
m_vk.present_semaphores.resize(m_vk.swapchain_images.size());
|
|
for (auto &semaphore : m_vk.present_semaphores) {
|
|
semaphore = m_device.createSemaphoreUnique(semaphore_ci);
|
|
}
|
|
}
|
|
|
|
auto VulkanRenderer::create_draw_image(uint32_t width, uint32_t height) -> void
|
|
{
|
|
destroy_draw_image();
|
|
|
|
auto const flags { vk::ImageUsageFlagBits::eTransferSrc
|
|
| vk::ImageUsageFlagBits::eTransferDst
|
|
| vk::ImageUsageFlagBits::eSampled
|
|
| vk::ImageUsageFlagBits::eColorAttachment };
|
|
m_vk.draw_image = create_image(
|
|
{ width, height, 1 }, vk::Format::eR16G16B16A16Sfloat, flags);
|
|
m_vk.draw_image_layout = vk::ImageLayout::eUndefined;
|
|
}
|
|
|
|
auto VulkanRenderer::create_msaa_color_image(uint32_t width, uint32_t height)
|
|
-> void
|
|
{
|
|
destroy_msaa_color_image();
|
|
|
|
if (m_vk.msaa_samples == vk::SampleCountFlagBits::e1) {
|
|
return;
|
|
}
|
|
|
|
auto const flags { vk::ImageUsageFlagBits::eColorAttachment };
|
|
m_vk.msaa_color_image = create_image(
|
|
{ width, height, 1 }, m_vk.draw_image.format, flags, m_vk.msaa_samples);
|
|
m_vk.msaa_color_image_layout = vk::ImageLayout::eUndefined;
|
|
}
|
|
|
|
auto VulkanRenderer::create_depth_image(uint32_t width, uint32_t height) -> void
|
|
{
|
|
destroy_depth_image();
|
|
|
|
auto const flags { vk::ImageUsageFlagBits::eTransferSrc
|
|
| vk::ImageUsageFlagBits::eTransferDst
|
|
| vk::ImageUsageFlagBits::eDepthStencilAttachment };
|
|
m_vk.depth_image = create_image(
|
|
{ width, height, 1 }, vk::Format::eD32Sfloat, flags, m_vk.msaa_samples);
|
|
m_vk.depth_image_layout = vk::ImageLayout::eUndefined;
|
|
}
|
|
|
|
auto VulkanRenderer::destroy_depth_image() -> void
|
|
{
|
|
if (m_vk.depth_image.image) {
|
|
m_device.destroyImageView(m_vk.depth_image.image_view);
|
|
m_vk.depth_image.image_view = vk::ImageView {};
|
|
vmaDestroyImage(m_vk.allocator,
|
|
static_cast<VkImage>(m_vk.depth_image.image),
|
|
m_vk.depth_image.allocation);
|
|
m_vk.depth_image.image = vk::Image {};
|
|
m_vk.depth_image.allocation = nullptr;
|
|
m_vk.depth_image.extent = vk::Extent3D { 0, 0, 0 };
|
|
m_vk.depth_image_layout = vk::ImageLayout::eUndefined;
|
|
}
|
|
}
|
|
|
|
auto VulkanRenderer::destroy_draw_image() -> void
|
|
{
|
|
if (m_vk.draw_image.image) {
|
|
m_device.destroyImageView(m_vk.draw_image.image_view);
|
|
m_vk.draw_image.image_view = vk::ImageView {};
|
|
vmaDestroyImage(m_vk.allocator,
|
|
static_cast<VkImage>(m_vk.draw_image.image),
|
|
m_vk.draw_image.allocation);
|
|
m_vk.draw_image.image = vk::Image {};
|
|
m_vk.draw_image.allocation = nullptr;
|
|
m_vk.draw_image.extent = vk::Extent3D { 0, 0, 0 };
|
|
m_vk.draw_image_layout = vk::ImageLayout::eUndefined;
|
|
}
|
|
}
|
|
|
|
auto VulkanRenderer::destroy_msaa_color_image() -> void
|
|
{
|
|
if (m_vk.msaa_color_image.image) {
|
|
m_device.destroyImageView(m_vk.msaa_color_image.image_view);
|
|
m_vk.msaa_color_image.image_view = vk::ImageView {};
|
|
vmaDestroyImage(m_vk.allocator,
|
|
static_cast<VkImage>(m_vk.msaa_color_image.image),
|
|
m_vk.msaa_color_image.allocation);
|
|
m_vk.msaa_color_image.image = vk::Image {};
|
|
m_vk.msaa_color_image.allocation = nullptr;
|
|
m_vk.msaa_color_image.extent = vk::Extent3D { 0, 0, 0 };
|
|
m_vk.msaa_color_image_layout = vk::ImageLayout::eUndefined;
|
|
}
|
|
}
|
|
|
|
auto VulkanRenderer::recreate_swapchain(uint32_t width, uint32_t height) -> void
|
|
{
|
|
m_device.waitIdle();
|
|
|
|
if (width == 0 || height == 0) {
|
|
destroy_swapchain();
|
|
destroy_draw_image();
|
|
destroy_msaa_color_image();
|
|
destroy_depth_image();
|
|
m_vk.swapchain_extent = vk::Extent2D { 0, 0 };
|
|
return;
|
|
}
|
|
|
|
destroy_swapchain();
|
|
destroy_draw_image();
|
|
destroy_msaa_color_image();
|
|
destroy_depth_image();
|
|
|
|
create_swapchain(width, height);
|
|
create_draw_image(width, height);
|
|
create_msaa_color_image(width, height);
|
|
create_depth_image(width, height);
|
|
}
|
|
|
|
auto VulkanRenderer::destroy_swapchain() -> void
|
|
{
|
|
if (!m_vk.swapchain)
|
|
return;
|
|
|
|
m_vk.present_semaphores.clear();
|
|
m_device.destroySwapchainKHR(m_vk.swapchain);
|
|
|
|
m_vk.swapchain = vk::SwapchainKHR {};
|
|
m_vk.swapchain_image_views.clear();
|
|
m_vk.swapchain_images.clear();
|
|
m_vk.present_semaphores.clear();
|
|
m_vk.swapchain_extent = vk::Extent2D { 0, 0 };
|
|
}
|
|
|
|
auto VulkanRenderer::create_image(vk::Extent3D size, vk::Format format,
|
|
vk::ImageUsageFlags flags, vk::SampleCountFlagBits samples, bool mipmapped)
|
|
-> AllocatedImage
|
|
{
|
|
AllocatedImage new_image;
|
|
new_image.format = format;
|
|
new_image.extent = size;
|
|
|
|
auto img_ci { vkinit::image_create_info(format, flags, size, samples) };
|
|
if (mipmapped) {
|
|
img_ci.mipLevels = static_cast<uint32_t>(std::floor(
|
|
std::log2(std::max(size.width, size.height))))
|
|
+ 1;
|
|
}
|
|
|
|
VmaAllocationCreateInfo alloc_ci {};
|
|
alloc_ci.usage = VMA_MEMORY_USAGE_GPU_ONLY;
|
|
alloc_ci.requiredFlags
|
|
= VkMemoryPropertyFlags(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
|
|
|
VK_CHECK(m_logger,
|
|
vmaCreateImage(m_vk.allocator,
|
|
reinterpret_cast<VkImageCreateInfo const *>(&img_ci), &alloc_ci,
|
|
reinterpret_cast<VkImage *>(&new_image.image),
|
|
&new_image.allocation, nullptr));
|
|
|
|
vk::ImageAspectFlags aspect_flag { vk::ImageAspectFlagBits::eColor };
|
|
if (format == vk::Format::eD32Sfloat) {
|
|
aspect_flag = vk::ImageAspectFlagBits::eDepth;
|
|
}
|
|
|
|
auto const view_ci { vkinit::imageview_create_info(
|
|
format, new_image.image, aspect_flag) };
|
|
new_image.image_view = m_device.createImageView(view_ci);
|
|
|
|
return new_image;
|
|
}
|
|
|
|
auto VulkanRenderer::create_image(void const *data, vk::Extent3D size,
|
|
vk::Format format, vk::ImageUsageFlags flags, bool mipmapped)
|
|
-> AllocatedImage
|
|
{
|
|
size_t data_size {
|
|
static_cast<uint32_t>(size.depth) * static_cast<uint32_t>(size.width)
|
|
* static_cast<uint32_t>(size.height) * 4,
|
|
};
|
|
auto const upload_buffer {
|
|
create_buffer(data_size, vk::BufferUsageFlagBits::eTransferSrc,
|
|
VMA_MEMORY_USAGE_CPU_TO_GPU),
|
|
};
|
|
|
|
VmaAllocationInfo info {};
|
|
vmaGetAllocationInfo(m_vk.allocator, upload_buffer.allocation, &info);
|
|
|
|
void *mapped_data { reinterpret_cast<GPUSceneData *>(info.pMappedData) };
|
|
bool mapped_here { false };
|
|
if (!mapped_data) {
|
|
VkResult res = vmaMapMemory(
|
|
m_vk.allocator, upload_buffer.allocation, (void **)&mapped_data);
|
|
assert(res == VK_SUCCESS);
|
|
mapped_here = true;
|
|
}
|
|
|
|
memcpy(mapped_data, data, data_size);
|
|
|
|
auto const new_image {
|
|
create_image(size, format,
|
|
flags | vk::ImageUsageFlagBits::eTransferDst
|
|
| vk::ImageUsageFlagBits::eTransferSrc,
|
|
vk::SampleCountFlagBits::e1, mipmapped),
|
|
};
|
|
|
|
immediate_submit([&](vk::CommandBuffer cmd) {
|
|
vkutil::transition_image(cmd, new_image.image,
|
|
vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal);
|
|
|
|
vk::BufferImageCopy copy_region {};
|
|
copy_region.imageSubresource.aspectMask
|
|
= vk::ImageAspectFlagBits::eColor;
|
|
copy_region.imageSubresource.mipLevel = 0;
|
|
copy_region.imageSubresource.baseArrayLayer = 0;
|
|
copy_region.imageSubresource.layerCount = 1;
|
|
copy_region.imageExtent = size;
|
|
|
|
cmd.copyBufferToImage(upload_buffer.buffer, new_image.image,
|
|
vk::ImageLayout::eTransferDstOptimal, copy_region);
|
|
|
|
vkutil::transition_image(cmd, new_image.image,
|
|
vk::ImageLayout::eTransferDstOptimal,
|
|
vk::ImageLayout::eShaderReadOnlyOptimal);
|
|
});
|
|
|
|
if (mapped_here) {
|
|
vmaUnmapMemory(m_vk.allocator, upload_buffer.allocation);
|
|
}
|
|
destroy_buffer(upload_buffer);
|
|
|
|
return new_image;
|
|
}
|
|
|
|
auto VulkanRenderer::destroy_image(AllocatedImage const &img) -> void
|
|
{
|
|
if (img.image_view) {
|
|
m_device.destroyImageView(img.image_view);
|
|
}
|
|
vmaDestroyImage(
|
|
m_vk.allocator, static_cast<VkImage>(img.image), img.allocation);
|
|
}
|
|
|
|
auto VulkanRenderer::create_buffer(size_t alloc_size,
|
|
vk::BufferUsageFlags usage, VmaMemoryUsage memory_usage) -> AllocatedBuffer
|
|
{
|
|
vk::BufferCreateInfo buffer_ci {};
|
|
buffer_ci.size = alloc_size;
|
|
buffer_ci.usage = usage;
|
|
buffer_ci.sharingMode = vk::SharingMode::eExclusive;
|
|
|
|
VmaAllocationCreateInfo alloc_ci {};
|
|
alloc_ci.usage = memory_usage;
|
|
alloc_ci.flags = 0;
|
|
if (memory_usage == VMA_MEMORY_USAGE_CPU_ONLY
|
|
|| memory_usage == VMA_MEMORY_USAGE_CPU_TO_GPU) {
|
|
alloc_ci.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT
|
|
| VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
|
}
|
|
|
|
AllocatedBuffer buffer;
|
|
VK_CHECK(m_logger,
|
|
vmaCreateBuffer(m_vk.allocator,
|
|
reinterpret_cast<VkBufferCreateInfo const *>(&buffer_ci), &alloc_ci,
|
|
reinterpret_cast<VkBuffer *>(&buffer.buffer), &buffer.allocation,
|
|
&buffer.info));
|
|
|
|
return buffer;
|
|
}
|
|
|
|
auto VulkanRenderer::destroy_buffer(AllocatedBuffer const &buffer) -> void
|
|
{
|
|
vmaDestroyBuffer(m_vk.allocator, buffer.buffer, buffer.allocation);
|
|
}
|
|
|
|
auto VulkanRenderer::upload_mesh(
|
|
std::span<uint32_t> indices, std::span<Vertex> vertices) -> GPUMeshBuffers
|
|
{
|
|
auto const vertex_buffer_size { vertices.size() * sizeof(Vertex) };
|
|
auto const index_buffer_size { indices.size() * sizeof(uint32_t) };
|
|
|
|
GPUMeshBuffers new_surface;
|
|
new_surface.vertex_buffer = create_buffer(vertex_buffer_size,
|
|
vk::BufferUsageFlagBits::eVertexBuffer
|
|
| vk::BufferUsageFlagBits::eTransferDst
|
|
| vk::BufferUsageFlagBits::eShaderDeviceAddress,
|
|
VMA_MEMORY_USAGE_GPU_ONLY);
|
|
|
|
vk::BufferDeviceAddressInfo device_address_info {};
|
|
device_address_info.buffer = new_surface.vertex_buffer.buffer;
|
|
|
|
new_surface.vertex_buffer_address
|
|
= m_device.getBufferAddress(device_address_info);
|
|
|
|
new_surface.index_buffer = create_buffer(index_buffer_size,
|
|
vk::BufferUsageFlagBits::eIndexBuffer
|
|
| vk::BufferUsageFlagBits::eTransferDst
|
|
| vk::BufferUsageFlagBits::eShaderDeviceAddress,
|
|
VMA_MEMORY_USAGE_GPU_ONLY);
|
|
|
|
auto staging { create_buffer(vertex_buffer_size + index_buffer_size,
|
|
vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY) };
|
|
|
|
VmaAllocationInfo info {};
|
|
vmaGetAllocationInfo(m_vk.allocator, staging.allocation, &info);
|
|
|
|
void *data = info.pMappedData;
|
|
bool mapped_here { false };
|
|
if (!data) {
|
|
VkResult res = vmaMapMemory(m_vk.allocator, staging.allocation, &data);
|
|
assert(res == VK_SUCCESS);
|
|
mapped_here = true;
|
|
}
|
|
|
|
memcpy(data, vertices.data(), vertex_buffer_size);
|
|
memcpy(reinterpret_cast<void *>(
|
|
reinterpret_cast<size_t>(data) + vertex_buffer_size),
|
|
indices.data(), index_buffer_size);
|
|
|
|
immediate_submit([&](vk::CommandBuffer cmd) {
|
|
vk::BufferCopy vertex_copy {};
|
|
vertex_copy.dstOffset = 0;
|
|
vertex_copy.srcOffset = 0;
|
|
vertex_copy.size = vertex_buffer_size;
|
|
|
|
cmd.copyBuffer(
|
|
staging.buffer, new_surface.vertex_buffer.buffer, vertex_copy);
|
|
|
|
vk::BufferCopy index_copy {};
|
|
index_copy.dstOffset = 0;
|
|
index_copy.srcOffset = vertex_buffer_size;
|
|
index_copy.size = index_buffer_size;
|
|
|
|
cmd.copyBuffer(
|
|
staging.buffer, new_surface.index_buffer.buffer, index_copy);
|
|
});
|
|
|
|
if (mapped_here) {
|
|
vmaUnmapMemory(m_vk.allocator, staging.allocation);
|
|
}
|
|
destroy_buffer(staging);
|
|
|
|
return new_surface;
|
|
}
|
|
|
|
} // namespace Lunar
|