HikoGUI
A low latency retained GUI
Loading...
Searching...
No Matches
gfx_device_vulkan.hpp
1// Copyright Take Vos 2019-2020.
2// Distributed under the Boost Software License, Version 1.0.
3// (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
4
5#pragma once
6
7#include "gfx_device.hpp"
8#include "gfx_system_globals.hpp"
9#include "gfx_queue_vulkan.hpp"
10#include "pipeline_flat_device_shared.hpp"
11#include "pipeline_image_device_shared.hpp"
12#include "pipeline_box_device_shared.hpp"
13#include "pipeline_SDF_device_shared.hpp"
14#include "pipeline_tone_mapper_device_shared.hpp"
15#include <vulkan/vulkan.hpp>
16#include <vk_mem_alloc.h>
17
18namespace tt {
19class URL;
20}
21
22namespace tt {
23
24class gfx_device_vulkan final : public gfx_device {
25public:
26
27 vk::PhysicalDeviceType deviceType = vk::PhysicalDeviceType::eOther;
28 vk::PhysicalDeviceProperties physicalProperties;
29
31
35 [[nodiscard]] gfx_queue_vulkan const &get_graphics_queue() const noexcept;
36
41 [[nodiscard]] gfx_queue_vulkan const &get_graphics_queue(gfx_surface const &surface) const noexcept;
42
47 [[nodiscard]] gfx_queue_vulkan const &get_present_queue(gfx_surface const &surface) const noexcept;
48
57 [[nodiscard]] vk::SurfaceFormatKHR get_surface_format(gfx_surface const &surface, int *score = nullptr) const noexcept;
58
67 [[nodiscard]] vk::PresentModeKHR get_present_mode(gfx_surface const &surface, int *score = nullptr) const noexcept;
68
79 vk::Buffer quadIndexBuffer;
80 VmaAllocation quadIndexBufferAllocation = {};
81
87
91
92 bool supportsLazyTransientImages = false;
93 vk::ImageUsageFlags transientImageUsageFlags = vk::ImageUsageFlags{};
94 VmaMemoryUsage lazyMemoryUsage = VMA_MEMORY_USAGE_GPU_ONLY;
95
96
97 gfx_device_vulkan(gfx_system &system, vk::PhysicalDevice physicalDevice);
99
100 gfx_device_vulkan(const gfx_device_vulkan &) = delete;
101 gfx_device_vulkan &operator=(const gfx_device_vulkan &) = delete;
103 gfx_device_vulkan &operator=(gfx_device_vulkan &&) = delete;
104
105 int score(vk::SurfaceKHR surface) const;
106
107 int score(gfx_surface const &surface) const override;
108
117
119 createBuffer(const vk::BufferCreateInfo &bufferCreateInfo, const VmaAllocationCreateInfo &allocationCreateInfo) const;
120
121 void destroyBuffer(const vk::Buffer &buffer, const VmaAllocation &allocation) const;
122
124 createImage(const vk::ImageCreateInfo &imageCreateInfo, const VmaAllocationCreateInfo &allocationCreateInfo) const;
125 void destroyImage(const vk::Image &image, const VmaAllocation &allocation) const;
126
127 vk::CommandBuffer beginSingleTimeCommands() const;
128 void endSingleTimeCommands(vk::CommandBuffer commandBuffer) const;
129
130 static void transition_layout(
131 vk::CommandBuffer command_buffer,
132 vk::Image image,
133 vk::Format format,
134 vk::ImageLayout src_layout,
135 vk::ImageLayout dst_layout);
136 void transition_layout(vk::Image image, vk::Format format, vk::ImageLayout src_layout, vk::ImageLayout dst_layout) const;
137
138 void copyImage(
139 vk::Image srcImage,
140 vk::ImageLayout srcLayout,
141 vk::Image dstImage,
142 vk::ImageLayout dstLayout,
143 vk::ArrayProxy<vk::ImageCopy const> regions) const;
144 void clearColorImage(
145 vk::Image image,
146 vk::ImageLayout layout,
147 vk::ClearColorValue const &color,
148 vk::ArrayProxy<const vk::ImageSubresourceRange> ranges) const;
149
150 template<typename T>
151 std::span<T> mapMemory(const VmaAllocation &allocation) const
152 {
153 tt_axiom(gfx_system_mutex.recurse_lock_count());
154
155 void *mapping;
156 ttlet result = static_cast<vk::Result>(vmaMapMemory(allocator, allocation, &mapping));
157
158 VmaAllocationInfo allocationInfo;
159 vmaGetAllocationInfo(allocator, allocation, &allocationInfo);
160
161 // Should we launder the pointer? The GPU has created the objects, not the C++ application.
162 T *mappingT = reinterpret_cast<T *>(mapping);
163 ttlet mappingSpan = std::span<T>(mappingT, allocationInfo.size / sizeof(T));
164
165 return vk::createResultValue(result, mappingSpan, "tt::gfx_device_vulkan::mapMemory");
166 }
167
168 void unmapMemory(const VmaAllocation &allocation) const;
169
170 void flushAllocation(const VmaAllocation &allocation, VkDeviceSize offset, VkDeviceSize size) const
171 {
172 tt_axiom(gfx_system_mutex.recurse_lock_count());
173
174 ttlet alignment = physicalProperties.limits.nonCoherentAtomSize;
175
176 ttlet alignedOffset = (offset / alignment) * alignment;
177 ttlet adjustedSize = size + (offset - alignedOffset);
178 ttlet alignedSize = ((adjustedSize + (alignment - 1)) / alignment) * alignment;
179
180 vmaFlushAllocation(allocator, allocation, alignedOffset, alignedSize);
181 }
182
183 vk::ShaderModule loadShader(uint32_t const *data, size_t size) const;
184
185 vk::ShaderModule loadShader(std::span<std::byte const> shaderObjectBytes) const;
186
187 vk::ShaderModule loadShader(URL const &shaderObjectLocation) const;
188
189 void waitIdle() const
190 {
191 tt_axiom(gfx_system_mutex.recurse_lock_count());
192 return intrinsic.waitIdle();
193 }
194
195 vk::Result waitForFences(vk::ArrayProxy<const vk::Fence> fences, vk::Bool32 waitAll, uint64_t timeout) const
196 {
197 tt_axiom(gfx_system_mutex.recurse_lock_count());
198 return intrinsic.waitForFences(fences, waitAll, timeout);
199 }
200
201 vk::Result acquireNextImageKHR(
202 vk::SwapchainKHR swapchain,
203 uint64_t timeout,
204 vk::Semaphore semaphore,
205 vk::Fence fence,
206 uint32_t *pImageIndex) const
207 {
208 tt_axiom(gfx_system_mutex.recurse_lock_count());
209 return intrinsic.acquireNextImageKHR(swapchain, timeout, semaphore, fence, pImageIndex);
210 }
211
212 void resetFences(vk::ArrayProxy<const vk::Fence> fences) const
213 {
214 tt_axiom(gfx_system_mutex.recurse_lock_count());
215 return intrinsic.resetFences(fences);
216 }
217
218 vk::Result createSwapchainKHR(
219 const vk::SwapchainCreateInfoKHR *pCreateInfo,
220 const vk::AllocationCallbacks *pAllocator,
221 vk::SwapchainKHR *pSwapchain) const
222 {
223 tt_axiom(gfx_system_mutex.recurse_lock_count());
224 return intrinsic.createSwapchainKHR(pCreateInfo, pAllocator, pSwapchain);
225 }
226
227 std::vector<vk::Image> getSwapchainImagesKHR(vk::SwapchainKHR swapchain) const
228 {
229 tt_axiom(gfx_system_mutex.recurse_lock_count());
230 return intrinsic.getSwapchainImagesKHR(swapchain);
231 }
232
233 vk::ImageView createImageView(const vk::ImageViewCreateInfo &createInfo) const
234 {
235 tt_axiom(gfx_system_mutex.recurse_lock_count());
236 return intrinsic.createImageView(createInfo);
237 }
238
239 vk::Framebuffer createFramebuffer(const vk::FramebufferCreateInfo &createInfo) const
240 {
241 tt_axiom(gfx_system_mutex.recurse_lock_count());
242 return intrinsic.createFramebuffer(createInfo);
243 }
244
245 vk::RenderPass createRenderPass(const vk::RenderPassCreateInfo &createInfo) const
246 {
247 tt_axiom(gfx_system_mutex.recurse_lock_count());
248 return intrinsic.createRenderPass(createInfo);
249 }
250
251 vk::Semaphore createSemaphore(const vk::SemaphoreCreateInfo &createInfo) const
252 {
253 tt_axiom(gfx_system_mutex.recurse_lock_count());
254 return intrinsic.createSemaphore(createInfo);
255 }
256
257 vk::Fence createFence(const vk::FenceCreateInfo &createInfo) const
258 {
259 tt_axiom(gfx_system_mutex.recurse_lock_count());
260 return intrinsic.createFence(createInfo);
261 }
262
263 vk::DescriptorSetLayout createDescriptorSetLayout(const vk::DescriptorSetLayoutCreateInfo &createInfo) const
264 {
265 tt_axiom(gfx_system_mutex.recurse_lock_count());
266 return intrinsic.createDescriptorSetLayout(createInfo);
267 }
268
269 vk::DescriptorPool createDescriptorPool(const vk::DescriptorPoolCreateInfo &createInfo) const
270 {
271 tt_axiom(gfx_system_mutex.recurse_lock_count());
272 return intrinsic.createDescriptorPool(createInfo);
273 }
274
275 vk::PipelineLayout createPipelineLayout(const vk::PipelineLayoutCreateInfo &createInfo) const
276 {
277 tt_axiom(gfx_system_mutex.recurse_lock_count());
278 return intrinsic.createPipelineLayout(createInfo);
279 }
280
281 vk::Pipeline createGraphicsPipeline(vk::PipelineCache pipelineCache, const vk::GraphicsPipelineCreateInfo &createInfo) const
282 {
283 tt_axiom(gfx_system_mutex.recurse_lock_count());
284 return intrinsic.createGraphicsPipeline(pipelineCache, createInfo).value;
285 }
286
287 vk::Sampler createSampler(const vk::SamplerCreateInfo &createInfo) const
288 {
289 tt_axiom(gfx_system_mutex.recurse_lock_count());
290 return intrinsic.createSampler(createInfo);
291 }
292
293 std::vector<vk::DescriptorSet> allocateDescriptorSets(const vk::DescriptorSetAllocateInfo &allocateInfo) const
294 {
295 tt_axiom(gfx_system_mutex.recurse_lock_count());
296 return intrinsic.allocateDescriptorSets(allocateInfo);
297 }
298
299 std::vector<vk::CommandBuffer> allocateCommandBuffers(const vk::CommandBufferAllocateInfo &allocateInfo) const
300 {
301 tt_axiom(gfx_system_mutex.recurse_lock_count());
302 return intrinsic.allocateCommandBuffers(allocateInfo);
303 }
304
305 void updateDescriptorSets(
306 vk::ArrayProxy<const vk::WriteDescriptorSet> descriptorWrites,
307 vk::ArrayProxy<const vk::CopyDescriptorSet> descriptorCopies) const
308 {
309 tt_axiom(gfx_system_mutex.recurse_lock_count());
310 return intrinsic.updateDescriptorSets(descriptorWrites, descriptorCopies);
311 }
312
313 void freeCommandBuffers(vk::CommandPool commandPool, vk::ArrayProxy<const vk::CommandBuffer> commandBuffers) const
314 {
315 tt_axiom(gfx_system_mutex.recurse_lock_count());
316 return intrinsic.freeCommandBuffers(commandPool, commandBuffers);
317 }
318
319 template<typename T>
320 void destroy(T x) const
321 {
322 tt_axiom(gfx_system_mutex.recurse_lock_count());
323 intrinsic.destroy(x);
324 }
325
326 vk::SurfaceCapabilitiesKHR getSurfaceCapabilitiesKHR(vk::SurfaceKHR surface) const
327 {
328 tt_axiom(gfx_system_mutex.recurse_lock_count());
329 return physicalIntrinsic.getSurfaceCapabilitiesKHR(surface);
330 }
331
332protected:
333 vk::PhysicalDevice physicalIntrinsic;
334 vk::Device intrinsic;
335 VmaAllocator allocator;
336
337private:
338 [[nodiscard]] std::vector<vk::DeviceQueueCreateInfo> make_device_queue_create_infos() const noexcept;
339 void initialize_queues(std::vector<vk::DeviceQueueCreateInfo> const &device_queue_create_infos) noexcept;
340 void initialize_device();
341 void initialize_quad_index_buffer();
342 void destroy_quad_index_buffer();
343};
344
345} // namespace tt
alignment
Vertical and horizontal alignment.
Definition alignment.hpp:47
STL namespace.
This is a RGBA floating point color.
Definition color.hpp:36
Definition gfx_device.hpp:22
Definition gfx_device_vulkan.hpp:24
gfx_queue_vulkan const & get_present_queue(gfx_surface const &surface) const noexcept
Get a present queue.
gfx_queue_vulkan const & get_graphics_queue() const noexcept
Get a graphics queue.
vk::Buffer quadIndexBuffer
Shared index buffer containing indices for drawing quads.
Definition gfx_device_vulkan.hpp:79
std::vector< const char * > requiredExtensions
Definition gfx_device_vulkan.hpp:90
vk::PresentModeKHR get_present_mode(gfx_surface const &surface, int *score=nullptr) const noexcept
Get the present mode.
vk::SurfaceFormatKHR get_surface_format(gfx_surface const &surface, int *score=nullptr) const noexcept
Get the surface format.
std::vector< std::pair< uint32_t, uint8_t > > find_best_queue_family_indices(vk::SurfaceKHR surface) const
int score(gfx_surface const &surface) const override
Definition gfx_queue_vulkan.hpp:12
Definition gfx_surface.hpp:16
Graphics system.
Definition gfx_system.hpp:21
int recurse_lock_count() const noexcept
This function should be used in tt_axiom() to check if the lock is held by current thread.
Definition unfair_recursive_mutex.hpp:60