HikoGUI
A low latency retained GUI
Loading...
Searching...
No Matches
gfx_device_vulkan.hpp
1// Copyright Take Vos 2019-2020.
2// Distributed under the Boost Software License, Version 1.0.
3// (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
4
5#pragma once
6
7#include "gfx_device.hpp"
8#include "gfx_system_globals.hpp"
9#include "gfx_queue_vulkan.hpp"
10#include "pipeline_image_device_shared.hpp"
11#include "pipeline_box_device_shared.hpp"
12#include "pipeline_SDF_device_shared.hpp"
13#include "pipeline_tone_mapper_device_shared.hpp"
14#include <vulkan/vulkan.hpp>
15#include <vk_mem_alloc.h>
16
17namespace tt {
18class URL;
19}
20
21namespace tt {
22
23class gfx_device_vulkan final : public gfx_device {
24public:
25
26 vk::PhysicalDeviceType deviceType = vk::PhysicalDeviceType::eOther;
27 vk::PhysicalDeviceProperties physicalProperties;
28
30
34 [[nodiscard]] gfx_queue_vulkan const &get_graphics_queue() const noexcept;
35
40 [[nodiscard]] gfx_queue_vulkan const &get_graphics_queue(gfx_surface const &surface) const noexcept;
41
46 [[nodiscard]] gfx_queue_vulkan const &get_present_queue(gfx_surface const &surface) const noexcept;
47
56 [[nodiscard]] vk::SurfaceFormatKHR get_surface_format(gfx_surface const &surface, int *score = nullptr) const noexcept;
57
66 [[nodiscard]] vk::PresentModeKHR get_present_mode(gfx_surface const &surface, int *score = nullptr) const noexcept;
67
78 vk::Buffer quadIndexBuffer;
79 VmaAllocation quadIndexBufferAllocation = {};
80
85
89
90 bool supportsLazyTransientImages = false;
91 vk::ImageUsageFlags transientImageUsageFlags = vk::ImageUsageFlags{};
92 VmaMemoryUsage lazyMemoryUsage = VMA_MEMORY_USAGE_GPU_ONLY;
93
94
95 gfx_device_vulkan(gfx_system &system, vk::PhysicalDevice physicalDevice);
97
98 gfx_device_vulkan(const gfx_device_vulkan &) = delete;
99 gfx_device_vulkan &operator=(const gfx_device_vulkan &) = delete;
101 gfx_device_vulkan &operator=(gfx_device_vulkan &&) = delete;
102
103 int score(vk::SurfaceKHR surface) const;
104
105 int score(gfx_surface const &surface) const override;
106
115
117 createBuffer(const vk::BufferCreateInfo &bufferCreateInfo, const VmaAllocationCreateInfo &allocationCreateInfo) const;
118
119 void destroyBuffer(const vk::Buffer &buffer, const VmaAllocation &allocation) const;
120
122 createImage(const vk::ImageCreateInfo &imageCreateInfo, const VmaAllocationCreateInfo &allocationCreateInfo) const;
123 void destroyImage(const vk::Image &image, const VmaAllocation &allocation) const;
124
125 vk::CommandBuffer beginSingleTimeCommands() const;
126 void endSingleTimeCommands(vk::CommandBuffer commandBuffer) const;
127
128 static void transition_layout(
129 vk::CommandBuffer command_buffer,
130 vk::Image image,
131 vk::Format format,
132 vk::ImageLayout src_layout,
133 vk::ImageLayout dst_layout);
134 void transition_layout(vk::Image image, vk::Format format, vk::ImageLayout src_layout, vk::ImageLayout dst_layout) const;
135
136 void copyImage(
137 vk::Image srcImage,
138 vk::ImageLayout srcLayout,
139 vk::Image dstImage,
140 vk::ImageLayout dstLayout,
141 vk::ArrayProxy<vk::ImageCopy const> regions) const;
142 void clearColorImage(
143 vk::Image image,
144 vk::ImageLayout layout,
145 vk::ClearColorValue const &color,
146 vk::ArrayProxy<const vk::ImageSubresourceRange> ranges) const;
147
148 template<typename T>
149 std::span<T> mapMemory(const VmaAllocation &allocation) const
150 {
151 tt_axiom(gfx_system_mutex.recurse_lock_count());
152
153 void *mapping;
154 ttlet result = static_cast<vk::Result>(vmaMapMemory(allocator, allocation, &mapping));
155
156 VmaAllocationInfo allocationInfo;
157 vmaGetAllocationInfo(allocator, allocation, &allocationInfo);
158
159 // Should we launder the pointer? The GPU has created the objects, not the C++ application.
160 T *mappingT = reinterpret_cast<T *>(mapping);
161 ttlet mappingSpan = std::span<T>(mappingT, allocationInfo.size / sizeof(T));
162
163 return vk::createResultValue(result, mappingSpan, "tt::gfx_device_vulkan::mapMemory");
164 }
165
166 void unmapMemory(const VmaAllocation &allocation) const;
167
168 void flushAllocation(const VmaAllocation &allocation, VkDeviceSize offset, VkDeviceSize size) const
169 {
170 tt_axiom(gfx_system_mutex.recurse_lock_count());
171
172 ttlet alignment = physicalProperties.limits.nonCoherentAtomSize;
173
174 ttlet alignedOffset = (offset / alignment) * alignment;
175 ttlet adjustedSize = size + (offset - alignedOffset);
176 ttlet alignedSize = ((adjustedSize + (alignment - 1)) / alignment) * alignment;
177
178 vmaFlushAllocation(allocator, allocation, alignedOffset, alignedSize);
179 }
180
181 vk::ShaderModule loadShader(uint32_t const *data, size_t size) const;
182
183 vk::ShaderModule loadShader(std::span<std::byte const> shaderObjectBytes) const;
184
185 vk::ShaderModule loadShader(URL const &shaderObjectLocation) const;
186
187 void waitIdle() const
188 {
189 tt_axiom(gfx_system_mutex.recurse_lock_count());
190 return intrinsic.waitIdle();
191 }
192
193 vk::Result waitForFences(vk::ArrayProxy<const vk::Fence> fences, vk::Bool32 waitAll, uint64_t timeout) const
194 {
195 tt_axiom(gfx_system_mutex.recurse_lock_count());
196 return intrinsic.waitForFences(fences, waitAll, timeout);
197 }
198
199 vk::Result acquireNextImageKHR(
200 vk::SwapchainKHR swapchain,
201 uint64_t timeout,
202 vk::Semaphore semaphore,
203 vk::Fence fence,
204 uint32_t *pImageIndex) const
205 {
206 tt_axiom(gfx_system_mutex.recurse_lock_count());
207 return intrinsic.acquireNextImageKHR(swapchain, timeout, semaphore, fence, pImageIndex);
208 }
209
210 void resetFences(vk::ArrayProxy<const vk::Fence> fences) const
211 {
212 tt_axiom(gfx_system_mutex.recurse_lock_count());
213 return intrinsic.resetFences(fences);
214 }
215
216 vk::Result createSwapchainKHR(
217 const vk::SwapchainCreateInfoKHR *pCreateInfo,
218 const vk::AllocationCallbacks *pAllocator,
219 vk::SwapchainKHR *pSwapchain) const
220 {
221 tt_axiom(gfx_system_mutex.recurse_lock_count());
222 return intrinsic.createSwapchainKHR(pCreateInfo, pAllocator, pSwapchain);
223 }
224
225 std::vector<vk::Image> getSwapchainImagesKHR(vk::SwapchainKHR swapchain) const
226 {
227 tt_axiom(gfx_system_mutex.recurse_lock_count());
228 return intrinsic.getSwapchainImagesKHR(swapchain);
229 }
230
231 vk::ImageView createImageView(const vk::ImageViewCreateInfo &createInfo) const
232 {
233 tt_axiom(gfx_system_mutex.recurse_lock_count());
234 return intrinsic.createImageView(createInfo);
235 }
236
237 vk::Framebuffer createFramebuffer(const vk::FramebufferCreateInfo &createInfo) const
238 {
239 tt_axiom(gfx_system_mutex.recurse_lock_count());
240 return intrinsic.createFramebuffer(createInfo);
241 }
242
243 vk::RenderPass createRenderPass(const vk::RenderPassCreateInfo &createInfo) const
244 {
245 tt_axiom(gfx_system_mutex.recurse_lock_count());
246 return intrinsic.createRenderPass(createInfo);
247 }
248
249 vk::Semaphore createSemaphore(const vk::SemaphoreCreateInfo &createInfo) const
250 {
251 tt_axiom(gfx_system_mutex.recurse_lock_count());
252 return intrinsic.createSemaphore(createInfo);
253 }
254
255 vk::Fence createFence(const vk::FenceCreateInfo &createInfo) const
256 {
257 tt_axiom(gfx_system_mutex.recurse_lock_count());
258 return intrinsic.createFence(createInfo);
259 }
260
261 vk::DescriptorSetLayout createDescriptorSetLayout(const vk::DescriptorSetLayoutCreateInfo &createInfo) const
262 {
263 tt_axiom(gfx_system_mutex.recurse_lock_count());
264 return intrinsic.createDescriptorSetLayout(createInfo);
265 }
266
267 vk::DescriptorPool createDescriptorPool(const vk::DescriptorPoolCreateInfo &createInfo) const
268 {
269 tt_axiom(gfx_system_mutex.recurse_lock_count());
270 return intrinsic.createDescriptorPool(createInfo);
271 }
272
273 vk::PipelineLayout createPipelineLayout(const vk::PipelineLayoutCreateInfo &createInfo) const
274 {
275 tt_axiom(gfx_system_mutex.recurse_lock_count());
276 return intrinsic.createPipelineLayout(createInfo);
277 }
278
279 vk::Pipeline createGraphicsPipeline(vk::PipelineCache pipelineCache, const vk::GraphicsPipelineCreateInfo &createInfo) const
280 {
281 tt_axiom(gfx_system_mutex.recurse_lock_count());
282 return intrinsic.createGraphicsPipeline(pipelineCache, createInfo).value;
283 }
284
285 vk::Sampler createSampler(const vk::SamplerCreateInfo &createInfo) const
286 {
287 tt_axiom(gfx_system_mutex.recurse_lock_count());
288 return intrinsic.createSampler(createInfo);
289 }
290
291 std::vector<vk::DescriptorSet> allocateDescriptorSets(const vk::DescriptorSetAllocateInfo &allocateInfo) const
292 {
293 tt_axiom(gfx_system_mutex.recurse_lock_count());
294 return intrinsic.allocateDescriptorSets(allocateInfo);
295 }
296
297 std::vector<vk::CommandBuffer> allocateCommandBuffers(const vk::CommandBufferAllocateInfo &allocateInfo) const
298 {
299 tt_axiom(gfx_system_mutex.recurse_lock_count());
300 return intrinsic.allocateCommandBuffers(allocateInfo);
301 }
302
303 void updateDescriptorSets(
304 vk::ArrayProxy<const vk::WriteDescriptorSet> descriptorWrites,
305 vk::ArrayProxy<const vk::CopyDescriptorSet> descriptorCopies) const
306 {
307 tt_axiom(gfx_system_mutex.recurse_lock_count());
308 return intrinsic.updateDescriptorSets(descriptorWrites, descriptorCopies);
309 }
310
311 void freeCommandBuffers(vk::CommandPool commandPool, vk::ArrayProxy<const vk::CommandBuffer> commandBuffers) const
312 {
313 tt_axiom(gfx_system_mutex.recurse_lock_count());
314 return intrinsic.freeCommandBuffers(commandPool, commandBuffers);
315 }
316
317 template<typename T>
318 void destroy(T x) const
319 {
320 tt_axiom(gfx_system_mutex.recurse_lock_count());
321 intrinsic.destroy(x);
322 }
323
324 vk::SurfaceCapabilitiesKHR getSurfaceCapabilitiesKHR(vk::SurfaceKHR surface) const
325 {
326 tt_axiom(gfx_system_mutex.recurse_lock_count());
327 return physicalIntrinsic.getSurfaceCapabilitiesKHR(surface);
328 }
329
330protected:
331 vk::PhysicalDevice physicalIntrinsic;
332 vk::Device intrinsic;
333 VmaAllocator allocator;
334
335private:
336 [[nodiscard]] std::vector<vk::DeviceQueueCreateInfo> make_device_queue_create_infos() const noexcept;
337 void initialize_queues(std::vector<vk::DeviceQueueCreateInfo> const &device_queue_create_infos) noexcept;
338 void initialize_device();
339 void initialize_quad_index_buffer();
340 void destroy_quad_index_buffer();
341};
342
343} // namespace tt
alignment
Vertical and horizontal alignment.
Definition alignment.hpp:47
STL namespace.
This is a RGBA floating point color.
Definition color.hpp:36
Definition gfx_device.hpp:22
Definition gfx_device_vulkan.hpp:23
gfx_queue_vulkan const & get_present_queue(gfx_surface const &surface) const noexcept
Get a present queue.
gfx_queue_vulkan const & get_graphics_queue() const noexcept
Get a graphics queue.
vk::Buffer quadIndexBuffer
Shared index buffer containing indices for drawing quads.
Definition gfx_device_vulkan.hpp:78
std::vector< const char * > requiredExtensions
Definition gfx_device_vulkan.hpp:88
vk::PresentModeKHR get_present_mode(gfx_surface const &surface, int *score=nullptr) const noexcept
Get the present mode.
vk::SurfaceFormatKHR get_surface_format(gfx_surface const &surface, int *score=nullptr) const noexcept
Get the surface format.
std::vector< std::pair< uint32_t, uint8_t > > find_best_queue_family_indices(vk::SurfaceKHR surface) const
int score(gfx_surface const &surface) const override
Definition gfx_queue_vulkan.hpp:12
Definition gfx_surface.hpp:16
Graphics system.
Definition gfx_system.hpp:21
int recurse_lock_count() const noexcept
This function should be used in tt_axiom() to check if the lock is held by current thread.
Definition unfair_recursive_mutex.hpp:60