27 uint32_t vendorID = 0;
28 uint32_t deviceID = 0;
31 vk::PhysicalDevice physicalIntrinsic;
33 VmaAllocator allocator;
35 vk::PhysicalDeviceType deviceType = vk::PhysicalDeviceType::eOther;
36 vk::PhysicalDeviceProperties physicalProperties;
55 VmaAllocation quadIndexBufferAllocation = {};
67 bool supportsLazyTransientImages =
false;
68 vk::ImageUsageFlags transientImageUsageFlags = vk::ImageUsageFlags{};
69 VmaMemoryUsage lazyMemoryUsage = VMA_MEMORY_USAGE_GPU_ONLY;
74 hilet lock = std::scoped_lock(gfx_system_mutex);
76 tone_mapper_pipeline->destroy(
this);
77 tone_mapper_pipeline =
nullptr;
78 alpha_pipeline->destroy(
this);
79 alpha_pipeline =
nullptr;
80 SDF_pipeline->destroy(
this);
81 SDF_pipeline =
nullptr;
82 image_pipeline->destroy(
this);
83 image_pipeline =
nullptr;
84 box_pipeline->destroy(
this);
85 box_pipeline =
nullptr;
87 destroy_quad_index_buffer();
89 vmaDestroyAllocator(allocator);
91 for (hilet& queue : _queues) {
92 intrinsic.destroy(queue.command_pool);
98 hi_log_fatal(
"Could not properly destruct gfx_device. '{}'", e.
what());
102 gfx_device(
const gfx_device&) =
delete;
103 gfx_device& operator=(
const gfx_device&) =
delete;
104 gfx_device(gfx_device&&) =
delete;
105 gfx_device& operator=(gfx_device&&) =
delete;
106 gfx_device(vk::PhysicalDevice physicalDevice);
110 hilet
lock = std::scoped_lock(gfx_system_mutex);
112 return std::format(
"{0:04x}:{1:04x} {2} {3}", vendorID, deviceID, deviceName, deviceUUID.uuid_string());
120 for (
auto& queue : _queues) {
121 if (queue.flags & vk::QueueFlagBits::eGraphics) {
136 for (
auto& queue : _queues) {
137 if (queue.flags & vk::QueueFlagBits::eGraphics) {
138 if (physicalIntrinsic.getSurfaceSupportKHR(queue.family_queue_index, surface)) {
141 if (not graphics_queue) {
142 graphics_queue = &queue;
147 hi_assert_not_null(graphics_queue);
148 return *graphics_queue;
159 for (
auto& queue : _queues) {
160 if (physicalIntrinsic.getSurfaceSupportKHR(queue.family_queue_index, surface)) {
161 if (queue.flags & vk::QueueFlagBits::eGraphics) {
164 if (not present_queue) {
165 present_queue = &queue;
170 hi_assert_not_null(present_queue);
171 return *present_queue;
182 [[nodiscard]] vk::SurfaceFormatKHR
get_surface_format(vk::SurfaceKHR surface,
int *score =
nullptr) const noexcept
184 auto best_surface_format = vk::SurfaceFormatKHR{};
185 auto best_surface_format_score = 0;
186 for (
auto surface_format : physicalIntrinsic.getSurfaceFormatsKHR(surface)) {
187 auto surface_format_score = 0;
189 switch (surface_format.colorSpace) {
190 case vk::ColorSpaceKHR::eSrgbNonlinear:
191 surface_format_score += 1;
193 case vk::ColorSpaceKHR::eExtendedSrgbNonlinearEXT:
194 surface_format_score += 10;
199 switch (surface_format.format) {
200 case vk::Format::eR16G16B16A16Sfloat:
201 if (os_settings::uniform_HDR()) {
202 surface_format_score += 12;
205 surface_format_score -= 100;
208 case vk::Format::eR16G16B16Sfloat:
209 if (os_settings::uniform_HDR()) {
210 surface_format_score += 11;
213 surface_format_score -= 100;
216 case vk::Format::eA2B10G10R10UnormPack32:
218 surface_format_score -= 100;
220 case vk::Format::eR8G8B8A8Srgb:
221 surface_format_score += 4;
223 case vk::Format::eB8G8R8A8Srgb:
224 surface_format_score += 4;
226 case vk::Format::eR8G8B8Srgb:
227 surface_format_score += 3;
229 case vk::Format::eB8G8R8Srgb:
230 surface_format_score += 3;
232 case vk::Format::eB8G8R8A8Unorm:
233 surface_format_score += 2;
235 case vk::Format::eR8G8B8A8Unorm:
236 surface_format_score += 2;
238 case vk::Format::eB8G8R8Unorm:
239 surface_format_score += 1;
241 case vk::Format::eR8G8B8Unorm:
242 surface_format_score += 1;
249 " - color-space={}, format={}, score={}",
250 vk::to_string(surface_format.colorSpace),
251 vk::to_string(surface_format.format),
252 surface_format_score);
255 if (surface_format_score > best_surface_format_score) {
256 best_surface_format_score = surface_format_score;
257 best_surface_format = surface_format;
262 *score = best_surface_format_score;
264 return best_surface_format;
275 [[nodiscard]] vk::PresentModeKHR
get_present_mode(vk::SurfaceKHR surface,
int *score =
nullptr) const noexcept
277 auto best_present_mode = vk::PresentModeKHR{};
278 auto best_present_mode_score = 0;
279 for (hilet& present_mode : physicalIntrinsic.getSurfacePresentModesKHR(surface)) {
280 int present_mode_score = 0;
282 switch (present_mode) {
283 case vk::PresentModeKHR::eImmediate:
284 present_mode_score += 1;
286 case vk::PresentModeKHR::eFifoRelaxed:
287 present_mode_score += 2;
289 case vk::PresentModeKHR::eFifo:
290 present_mode_score += 3;
292 case vk::PresentModeKHR::eMailbox:
293 present_mode_score += 1;
300 hi_log_info(
" - present-mode={} score={}", vk::to_string(present_mode), present_mode_score);
303 if (present_mode_score > best_present_mode_score) {
304 best_present_mode_score = present_mode_score;
305 best_present_mode = present_mode;
310 *score = best_present_mode_score;
312 return best_present_mode;
322 int score(vk::SurfaceKHR surface)
const;
334 createBuffer(
const vk::BufferCreateInfo& bufferCreateInfo,
const VmaAllocationCreateInfo& allocationCreateInfo)
const
339 VmaAllocation allocation;
341 hilet bufferCreateInfo_ =
static_cast<VkBufferCreateInfo
>(bufferCreateInfo);
343 vk::Result{vmaCreateBuffer(allocator, &bufferCreateInfo_, &allocationCreateInfo, &buffer, &allocation,
nullptr)};
345 if (result != vk::Result::eSuccess) {
346 throw gui_error(std::format(
"vmaCreateBuffer() failed {}", to_string(result)));
349 return {buffer, allocation};
352 void destroyBuffer(
const vk::Buffer& buffer,
const VmaAllocation& allocation)
const
356 vmaDestroyBuffer(allocator, buffer, allocation);
360 createImage(
const vk::ImageCreateInfo& imageCreateInfo,
const VmaAllocationCreateInfo& allocationCreateInfo)
const
365 VmaAllocation allocation;
367 hilet imageCreateInfo_ =
static_cast<VkImageCreateInfo
>(imageCreateInfo);
369 vk::Result{vmaCreateImage(allocator, &imageCreateInfo_, &allocationCreateInfo, &image, &allocation,
nullptr)};
371 if (result != vk::Result::eSuccess) {
372 throw gui_error(std::format(
"vmaCreateImage() failed {}",
to_string(result)));
375 return {image, allocation};
378 void destroyImage(
const vk::Image& image,
const VmaAllocation& allocation)
const
382 vmaDestroyImage(allocator, image, allocation);
385 vk::CommandBuffer beginSingleTimeCommands()
const
389 hilet& queue = get_graphics_queue();
390 hilet commandBuffers = intrinsic.allocateCommandBuffers({queue.command_pool, vk::CommandBufferLevel::ePrimary, 1});
391 hilet commandBuffer = commandBuffers.at(0);
393 commandBuffer.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit});
394 return commandBuffer;
397 void endSingleTimeCommands(vk::CommandBuffer commandBuffer)
const
405 hilet& queue = get_graphics_queue();
411 narrow_cast<uint32_t>(commandBuffers.
size()),
412 commandBuffers.
data(),
418 queue.queue.waitIdle();
419 intrinsic.freeCommandBuffers(queue.command_pool, commandBuffers);
422 static void transition_layout(
423 vk::CommandBuffer command_buffer,
426 vk::ImageLayout src_layout,
427 vk::ImageLayout dst_layout)
431 hilet[srcAccessMask, srcStage] = access_and_stage_from_layout(src_layout);
432 hilet[dstAccessMask, dstStage] = access_and_stage_from_layout(dst_layout);
439 VK_QUEUE_FAMILY_IGNORED,
440 VK_QUEUE_FAMILY_IGNORED,
443 vk::ImageAspectFlagBits::eColor,
450 command_buffer.pipelineBarrier(
453 vk::DependencyFlags(),
458 narrow_cast<uint32_t>(barriers.
size()),
462 void transition_layout(vk::Image image, vk::Format format, vk::ImageLayout src_layout, vk::ImageLayout dst_layout)
const
466 hilet command_buffer = beginSingleTimeCommands();
468 transition_layout(command_buffer, image, format, src_layout, dst_layout);
470 endSingleTimeCommands(command_buffer);
475 vk::ImageLayout srcLayout,
477 vk::ImageLayout dstLayout,
478 vk::ArrayProxy<vk::ImageCopy const> regions)
const
482 hilet commandBuffer = beginSingleTimeCommands();
484 commandBuffer.copyImage(srcImage, srcLayout, dstImage, dstLayout, regions);
486 endSingleTimeCommands(commandBuffer);
489 void clearColorImage(
491 vk::ImageLayout layout,
492 vk::ClearColorValue
const& color,
493 vk::ArrayProxy<const vk::ImageSubresourceRange> ranges)
const
497 hilet commandBuffer = beginSingleTimeCommands();
499 commandBuffer.clearColorImage(image, layout, color, ranges);
501 endSingleTimeCommands(commandBuffer);
505 std::span<T> mapMemory(
const VmaAllocation& allocation)
const
510 hilet result = vk::Result{vmaMapMemory(allocator, allocation, &mapping)};
511 if (result != vk::Result::eSuccess) {
512 throw gui_error(std::format(
"vmaMapMemory failed {}",
to_string(result)));
515 VmaAllocationInfo allocationInfo;
516 vmaGetAllocationInfo(allocator, allocation, &allocationInfo);
519 T *mappingT =
static_cast<T *
>(mapping);
520 return std::span<T>{mappingT, allocationInfo.size /
sizeof(T)};
523 void unmapMemory(
const VmaAllocation& allocation)
const
527 vmaUnmapMemory(allocator, allocation);
530 void flushAllocation(
const VmaAllocation& allocation, VkDeviceSize offset, VkDeviceSize size)
const
534 hilet alignment = physicalProperties.limits.nonCoherentAtomSize;
536 hilet alignedOffset = (offset / alignment) * alignment;
537 hilet adjustedSize = size + (offset - alignedOffset);
538 hilet alignedSize = ((adjustedSize + (alignment - 1)) / alignment) * alignment;
540 vmaFlushAllocation(allocator, allocation, alignedOffset, alignedSize);
543 vk::ShaderModule loadShader(uint32_t
const *data,
std::size_t size)
const
547 hi_log_info(
"Loading shader");
552 return intrinsic.createShaderModule({vk::ShaderModuleCreateFlags(), size, data});
555 vk::ShaderModule loadShader(std::span<std::byte const> shaderObjectBytes)
const
560 hilet address =
reinterpret_cast<uintptr_t
>(shaderObjectBytes.data());
561 hi_assert((address & 2) == 0);
563 hilet shaderObjectBytes32 =
reinterpret_cast<uint32_t
const *
>(shaderObjectBytes.data());
564 return loadShader(shaderObjectBytes32, shaderObjectBytes.size());
567 vk::ShaderModule loadShader(std::filesystem::path
const& path)
const
571 return loadShader(as_span<std::byte const>(file_view{path}));
574 void waitIdle()
const
577 return intrinsic.waitIdle();
580 vk::Result waitForFences(vk::ArrayProxy<const vk::Fence> fences, vk::Bool32 waitAll, uint64_t timeout)
const
583 return intrinsic.waitForFences(fences, waitAll, timeout);
586 vk::Result acquireNextImageKHR(
587 vk::SwapchainKHR swapchain,
589 vk::Semaphore semaphore,
591 uint32_t *pImageIndex)
const
594 return intrinsic.acquireNextImageKHR(swapchain, timeout, semaphore, fence, pImageIndex);
597 void resetFences(vk::ArrayProxy<const vk::Fence> fences)
const
600 return intrinsic.resetFences(fences);
603 vk::Result createSwapchainKHR(
604 const vk::SwapchainCreateInfoKHR *pCreateInfo,
605 const vk::AllocationCallbacks *pAllocator,
606 vk::SwapchainKHR *pSwapchain)
const
609 return intrinsic.createSwapchainKHR(pCreateInfo, pAllocator, pSwapchain);
615 return intrinsic.getSwapchainImagesKHR(swapchain);
618 vk::ImageView createImageView(
const vk::ImageViewCreateInfo& createInfo)
const
621 return intrinsic.createImageView(createInfo);
624 vk::Framebuffer createFramebuffer(
const vk::FramebufferCreateInfo& createInfo)
const
627 return intrinsic.createFramebuffer(createInfo);
630 vk::RenderPass createRenderPass(
const vk::RenderPassCreateInfo& createInfo)
const
633 return intrinsic.createRenderPass(createInfo);
636 vk::Extent2D getRenderAreaGranularity(
const vk::RenderPass& render_pass)
const noexcept
640 intrinsic.getRenderAreaGranularity(render_pass, &r);
644 vk::Semaphore createSemaphore(
const vk::SemaphoreCreateInfo& createInfo = vk::SemaphoreCreateInfo{})
const
647 return intrinsic.createSemaphore(createInfo);
650 vk::Fence createFence(
const vk::FenceCreateInfo& createInfo)
const
653 return intrinsic.createFence(createInfo);
656 vk::DescriptorSetLayout createDescriptorSetLayout(
const vk::DescriptorSetLayoutCreateInfo& createInfo)
const
659 return intrinsic.createDescriptorSetLayout(createInfo);
662 vk::DescriptorPool createDescriptorPool(
const vk::DescriptorPoolCreateInfo& createInfo)
const
665 return intrinsic.createDescriptorPool(createInfo);
668 vk::PipelineLayout createPipelineLayout(
const vk::PipelineLayoutCreateInfo& createInfo)
const
671 return intrinsic.createPipelineLayout(createInfo);
674 vk::Pipeline createGraphicsPipeline(vk::PipelineCache pipelineCache,
const vk::GraphicsPipelineCreateInfo& createInfo)
const
677 return intrinsic.createGraphicsPipeline(pipelineCache, createInfo).value;
680 vk::Sampler createSampler(
const vk::SamplerCreateInfo& createInfo)
const
683 return intrinsic.createSampler(createInfo);
689 return intrinsic.allocateDescriptorSets(allocateInfo);
695 return intrinsic.allocateCommandBuffers(allocateInfo);
698 void updateDescriptorSets(
699 vk::ArrayProxy<const vk::WriteDescriptorSet> descriptorWrites,
700 vk::ArrayProxy<const vk::CopyDescriptorSet> descriptorCopies)
const
703 return intrinsic.updateDescriptorSets(descriptorWrites, descriptorCopies);
706 void freeCommandBuffers(vk::CommandPool commandPool, vk::ArrayProxy<const vk::CommandBuffer> commandBuffers)
const
709 return intrinsic.freeCommandBuffers(commandPool, commandBuffers);
712 void setDebugUtilsObjectNameEXT(vk::DebugUtilsObjectNameInfoEXT
const& name_info)
const;
714 void setDebugUtilsObjectNameEXT(vk::Image image,
char const *name)
const
716 return setDebugUtilsObjectNameEXT(
717 vk::DebugUtilsObjectNameInfoEXT{vk::ObjectType::eImage, std::bit_cast<uint64_t>(image), name});
720 void setDebugUtilsObjectNameEXT(vk::Buffer buffer,
char const *name)
const
722 return setDebugUtilsObjectNameEXT(
723 vk::DebugUtilsObjectNameInfoEXT{vk::ObjectType::eBuffer, std::bit_cast<uint64_t>(buffer), name});
726 void setDebugUtilsObjectNameEXT(vk::Sampler sampler,
char const *name)
const
728 return setDebugUtilsObjectNameEXT(
729 vk::DebugUtilsObjectNameInfoEXT{vk::ObjectType::eSampler, std::bit_cast<uint64_t>(sampler), name});
732 void setDebugUtilsObjectNameEXT(vk::ShaderModule shader_module,
char const *name)
const
734 return setDebugUtilsObjectNameEXT(
735 vk::DebugUtilsObjectNameInfoEXT{vk::ObjectType::eShaderModule, std::bit_cast<uint64_t>(shader_module), name});
738 void cmdBeginDebugUtilsLabelEXT(vk::CommandBuffer buffer, vk::DebugUtilsLabelEXT
const& create_info)
const;
740 void cmdEndDebugUtilsLabelEXT(vk::CommandBuffer buffer)
const;
742 void cmdBeginDebugUtilsLabelEXT(vk::CommandBuffer buffer,
char const *name)
const
744 return cmdBeginDebugUtilsLabelEXT(buffer, vk::DebugUtilsLabelEXT{name});
748 void destroy(T x)
const
751 intrinsic.destroy(x);
754 vk::SurfaceCapabilitiesKHR getSurfaceCapabilitiesKHR(vk::SurfaceKHR surface)
const
757 return physicalIntrinsic.getSurfaceCapabilitiesKHR(surface);
760 void log_memory_usage() const noexcept
762 hi_log_info(
"Memory usage for gfx device {}:",
string());
765 vmaBuildStatsString(allocator, &stat_string, VK_TRUE);
766 hi_log_info(
" * {}", stat_string);
767 vmaFreeStatsString(allocator, stat_string);
775 for (
auto availableExtensionProperties : physicalDevice.enumerateDeviceExtensionProperties()) {
776 availableExtensions.insert(
std::string(availableExtensionProperties.extensionName.data()));
779 for (
auto requiredExtension : requiredExtensions) {
780 if (availableExtensions.count(requiredExtension) == 0) {
787 static bool meetsRequiredLimits(
const vk::PhysicalDevice& physicalDevice,
const vk::PhysicalDeviceLimits& requiredLimits)
792 static bool hasRequiredFeatures(
const vk::PhysicalDevice& physicalDevice,
const vk::PhysicalDeviceFeatures& requiredFeatures)
794 hilet availableFeatures = physicalDevice.getFeatures();
795 auto meetsRequirements =
true;
798 (requiredFeatures.robustBufferAccess == VK_TRUE) ? (availableFeatures.robustBufferAccess == VK_TRUE) :
true;
800 (requiredFeatures.fullDrawIndexUint32 == VK_TRUE) ? (availableFeatures.fullDrawIndexUint32 == VK_TRUE) :
true;
801 meetsRequirements &= (requiredFeatures.imageCubeArray == VK_TRUE) ? (availableFeatures.imageCubeArray == VK_TRUE) :
true;
803 (requiredFeatures.independentBlend == VK_TRUE) ? (availableFeatures.independentBlend == VK_TRUE) :
true;
804 meetsRequirements &= (requiredFeatures.geometryShader == VK_TRUE) ? (availableFeatures.geometryShader == VK_TRUE) :
true;
806 (requiredFeatures.tessellationShader == VK_TRUE) ? (availableFeatures.tessellationShader == VK_TRUE) :
true;
808 (requiredFeatures.sampleRateShading == VK_TRUE) ? (availableFeatures.sampleRateShading == VK_TRUE) :
true;
809 meetsRequirements &= (requiredFeatures.dualSrcBlend == VK_TRUE) ? (availableFeatures.dualSrcBlend == VK_TRUE) :
true;
810 meetsRequirements &= (requiredFeatures.logicOp == VK_TRUE) ? (availableFeatures.logicOp == VK_TRUE) :
true;
812 (requiredFeatures.multiDrawIndirect == VK_TRUE) ? (availableFeatures.multiDrawIndirect == VK_TRUE) :
true;
813 meetsRequirements &= (requiredFeatures.drawIndirectFirstInstance == VK_TRUE) ?
814 (availableFeatures.drawIndirectFirstInstance == VK_TRUE) :
816 meetsRequirements &= (requiredFeatures.depthClamp == VK_TRUE) ? (availableFeatures.depthClamp == VK_TRUE) :
true;
817 meetsRequirements &= (requiredFeatures.depthBiasClamp == VK_TRUE) ? (availableFeatures.depthBiasClamp == VK_TRUE) :
true;
819 (requiredFeatures.fillModeNonSolid == VK_TRUE) ? (availableFeatures.fillModeNonSolid == VK_TRUE) :
true;
820 meetsRequirements &= (requiredFeatures.depthBounds == VK_TRUE) ? (availableFeatures.depthBounds == VK_TRUE) :
true;
821 meetsRequirements &= (requiredFeatures.wideLines == VK_TRUE) ? (availableFeatures.wideLines == VK_TRUE) :
true;
822 meetsRequirements &= (requiredFeatures.largePoints == VK_TRUE) ? (availableFeatures.largePoints == VK_TRUE) :
true;
823 meetsRequirements &= (requiredFeatures.alphaToOne == VK_TRUE) ? (availableFeatures.alphaToOne == VK_TRUE) :
true;
824 meetsRequirements &= (requiredFeatures.multiViewport == VK_TRUE) ? (availableFeatures.multiViewport == VK_TRUE) :
true;
826 (requiredFeatures.samplerAnisotropy == VK_TRUE) ? (availableFeatures.samplerAnisotropy == VK_TRUE) :
true;
828 (requiredFeatures.textureCompressionETC2 == VK_TRUE) ? (availableFeatures.textureCompressionETC2 == VK_TRUE) :
true;
829 meetsRequirements &= (requiredFeatures.textureCompressionASTC_LDR == VK_TRUE) ?
830 (availableFeatures.textureCompressionASTC_LDR == VK_TRUE) :
833 (requiredFeatures.textureCompressionBC == VK_TRUE) ? (availableFeatures.textureCompressionBC == VK_TRUE) :
true;
835 (requiredFeatures.occlusionQueryPrecise == VK_TRUE) ? (availableFeatures.occlusionQueryPrecise == VK_TRUE) :
true;
837 (requiredFeatures.pipelineStatisticsQuery == VK_TRUE) ? (availableFeatures.pipelineStatisticsQuery == VK_TRUE) :
true;
838 meetsRequirements &= (requiredFeatures.vertexPipelineStoresAndAtomics == VK_TRUE) ?
839 (availableFeatures.vertexPipelineStoresAndAtomics == VK_TRUE) :
841 meetsRequirements &= (requiredFeatures.fragmentStoresAndAtomics == VK_TRUE) ?
842 (availableFeatures.fragmentStoresAndAtomics == VK_TRUE) :
844 meetsRequirements &= (requiredFeatures.shaderTessellationAndGeometryPointSize == VK_TRUE) ?
845 (availableFeatures.shaderTessellationAndGeometryPointSize == VK_TRUE) :
847 meetsRequirements &= (requiredFeatures.shaderImageGatherExtended == VK_TRUE) ?
848 (availableFeatures.shaderImageGatherExtended == VK_TRUE) :
850 meetsRequirements &= (requiredFeatures.shaderStorageImageExtendedFormats == VK_TRUE) ?
851 (availableFeatures.shaderStorageImageExtendedFormats == VK_TRUE) :
853 meetsRequirements &= (requiredFeatures.shaderStorageImageMultisample == VK_TRUE) ?
854 (availableFeatures.shaderStorageImageMultisample == VK_TRUE) :
856 meetsRequirements &= (requiredFeatures.shaderStorageImageReadWithoutFormat == VK_TRUE) ?
857 (availableFeatures.shaderStorageImageReadWithoutFormat == VK_TRUE) :
859 meetsRequirements &= (requiredFeatures.shaderStorageImageWriteWithoutFormat == VK_TRUE) ?
860 (availableFeatures.shaderStorageImageWriteWithoutFormat == VK_TRUE) :
862 meetsRequirements &= (requiredFeatures.shaderUniformBufferArrayDynamicIndexing == VK_TRUE) ?
863 (availableFeatures.shaderUniformBufferArrayDynamicIndexing == VK_TRUE) :
865 meetsRequirements &= (requiredFeatures.shaderSampledImageArrayDynamicIndexing == VK_TRUE) ?
866 (availableFeatures.shaderSampledImageArrayDynamicIndexing == VK_TRUE) :
868 meetsRequirements &= (requiredFeatures.shaderStorageBufferArrayDynamicIndexing == VK_TRUE) ?
869 (availableFeatures.shaderStorageBufferArrayDynamicIndexing == VK_TRUE) :
871 meetsRequirements &= (requiredFeatures.shaderStorageImageArrayDynamicIndexing == VK_TRUE) ?
872 (availableFeatures.shaderStorageImageArrayDynamicIndexing == VK_TRUE) :
875 (requiredFeatures.shaderClipDistance == VK_TRUE) ? (availableFeatures.shaderClipDistance == VK_TRUE) :
true;
877 (requiredFeatures.shaderCullDistance == VK_TRUE) ? (availableFeatures.shaderCullDistance == VK_TRUE) :
true;
878 meetsRequirements &= (requiredFeatures.shaderFloat64 == VK_TRUE) ? (availableFeatures.shaderFloat64 == VK_TRUE) :
true;
879 meetsRequirements &= (requiredFeatures.shaderInt64 == VK_TRUE) ? (availableFeatures.shaderInt64 == VK_TRUE) :
true;
880 meetsRequirements &= (requiredFeatures.shaderInt16 == VK_TRUE) ? (availableFeatures.shaderInt16 == VK_TRUE) :
true;
882 (requiredFeatures.shaderResourceResidency == VK_TRUE) ? (availableFeatures.shaderResourceResidency == VK_TRUE) :
true;
884 (requiredFeatures.shaderResourceMinLod == VK_TRUE) ? (availableFeatures.shaderResourceMinLod == VK_TRUE) :
true;
885 meetsRequirements &= (requiredFeatures.sparseBinding == VK_TRUE) ? (availableFeatures.sparseBinding == VK_TRUE) :
true;
887 (requiredFeatures.sparseResidencyBuffer == VK_TRUE) ? (availableFeatures.sparseResidencyBuffer == VK_TRUE) :
true;
889 (requiredFeatures.sparseResidencyImage2D == VK_TRUE) ? (availableFeatures.sparseResidencyImage2D == VK_TRUE) :
true;
891 (requiredFeatures.sparseResidencyImage3D == VK_TRUE) ? (availableFeatures.sparseResidencyImage3D == VK_TRUE) :
true;
893 (requiredFeatures.sparseResidency2Samples == VK_TRUE) ? (availableFeatures.sparseResidency2Samples == VK_TRUE) :
true;
895 (requiredFeatures.sparseResidency4Samples == VK_TRUE) ? (availableFeatures.sparseResidency4Samples == VK_TRUE) :
true;
897 (requiredFeatures.sparseResidency8Samples == VK_TRUE) ? (availableFeatures.sparseResidency8Samples == VK_TRUE) :
true;
898 meetsRequirements &= (requiredFeatures.sparseResidency16Samples == VK_TRUE) ?
899 (availableFeatures.sparseResidency16Samples == VK_TRUE) :
902 (requiredFeatures.sparseResidencyAliased == VK_TRUE) ? (availableFeatures.sparseResidencyAliased == VK_TRUE) :
true;
904 (requiredFeatures.variableMultisampleRate == VK_TRUE) ? (availableFeatures.variableMultisampleRate == VK_TRUE) :
true;
906 (requiredFeatures.inheritedQueries == VK_TRUE) ? (availableFeatures.inheritedQueries == VK_TRUE) :
true;
908 return meetsRequirements;
913 hilet default_queue_priority =
std::array{1.0f};
914 uint32_t queue_family_index = 0;
917 for (
auto queue_family_properties : physicalIntrinsic.getQueueFamilyProperties()) {
918 hilet num_queues = 1;
919 hi_assert(size(default_queue_priority) >= num_queues);
920 r.emplace_back(vk::DeviceQueueCreateFlags(), queue_family_index++, num_queues, default_queue_priority.data());
928 case vk::ImageLayout::eUndefined:
929 return {vk::AccessFlags(), vk::PipelineStageFlagBits::eTopOfPipe};
932 case vk::ImageLayout::eTransferDstOptimal:
933 return {vk::AccessFlagBits::eTransferWrite, vk::PipelineStageFlagBits::eTransfer};
935 case vk::ImageLayout::eShaderReadOnlyOptimal:
936 return {vk::AccessFlagBits::eShaderRead, vk::PipelineStageFlagBits::eFragmentShader};
939 case vk::ImageLayout::eGeneral:
940 return {vk::AccessFlagBits::eHostWrite, vk::PipelineStageFlagBits::eHost};
942 case vk::ImageLayout::eTransferSrcOptimal:
943 return {vk::AccessFlagBits::eTransferRead, vk::PipelineStageFlagBits::eTransfer};
948 case vk::ImageLayout::ePresentSrcKHR:
950 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
951 vk::PipelineStageFlagBits::eColorAttachmentOutput};
960 hilet queue_family_properties = physicalIntrinsic.getQueueFamilyProperties();
962 for (hilet& device_queue_create_info : device_queue_create_infos) {
963 hilet queue_family_index = device_queue_create_info.queueFamilyIndex;
964 hilet& queue_family_property = queue_family_properties[queue_family_index];
965 hilet queue_flags = queue_family_property.queueFlags;
967 for (uint32_t queue_index = 0; queue_index != device_queue_create_info.queueCount; ++queue_index) {
968 auto queue = intrinsic.getQueue(queue_family_index, queue_index);
969 auto command_pool = intrinsic.createCommandPool(
970 {vk::CommandPoolCreateFlagBits::eTransient | vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
971 queue_family_index});
978 void initialize_device();
980 void initialize_quad_index_buffer()
984 using vertex_index_type = uint16_t;
985 constexpr ssize_t maximum_number_of_vertices = 1 << (
sizeof(vertex_index_type) * CHAR_BIT);
986 constexpr ssize_t maximum_number_of_quads = maximum_number_of_vertices / 4;
987 constexpr ssize_t maximum_number_of_triangles = maximum_number_of_quads * 2;
988 constexpr ssize_t maximum_number_of_indices = maximum_number_of_triangles * 3;
992 vk::BufferCreateInfo
const bufferCreateInfo = {
993 vk::BufferCreateFlags(),
994 sizeof(vertex_index_type) * maximum_number_of_indices,
995 vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eTransferDst,
996 vk::SharingMode::eExclusive};
997 VmaAllocationCreateInfo allocationCreateInfo = {};
998 allocationCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
999 allocationCreateInfo.pUserData =
const_cast<char *
>(
"vertex index buffer");
1000 allocationCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY;
1001 std::tie(quadIndexBuffer, quadIndexBufferAllocation) = createBuffer(bufferCreateInfo, allocationCreateInfo);
1002 setDebugUtilsObjectNameEXT(quadIndexBuffer,
"vertex index buffer");
1008 vk::BufferCreateInfo
const bufferCreateInfo = {
1009 vk::BufferCreateFlags(),
1010 sizeof(vertex_index_type) * maximum_number_of_indices,
1011 vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eTransferSrc,
1012 vk::SharingMode::eExclusive};
1013 VmaAllocationCreateInfo allocationCreateInfo = {};
1014 allocationCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
1015 allocationCreateInfo.pUserData =
const_cast<char *
>(
"staging vertex index buffer");
1016 allocationCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
1017 hilet[stagingvertexIndexBuffer, stagingvertexIndexBufferAllocation] =
1018 createBuffer(bufferCreateInfo, allocationCreateInfo);
1019 setDebugUtilsObjectNameEXT(stagingvertexIndexBuffer,
"staging vertex index buffer");
1022 hilet stagingvertexIndexBufferData = mapMemory<vertex_index_type>(stagingvertexIndexBufferAllocation);
1023 for (
std::size_t i = 0; i < maximum_number_of_indices; i++) {
1024 hilet vertexInRectangle = i % 6;
1025 hilet rectangleNr = i / 6;
1026 hilet rectangleBase = rectangleNr * 4;
1028 switch (vertexInRectangle) {
1030 stagingvertexIndexBufferData[i] = narrow_cast<vertex_index_type>(rectangleBase + 0);
1033 stagingvertexIndexBufferData[i] = narrow_cast<vertex_index_type>(rectangleBase + 1);
1036 stagingvertexIndexBufferData[i] = narrow_cast<vertex_index_type>(rectangleBase + 2);
1039 stagingvertexIndexBufferData[i] = narrow_cast<vertex_index_type>(rectangleBase + 2);
1042 stagingvertexIndexBufferData[i] = narrow_cast<vertex_index_type>(rectangleBase + 1);
1045 stagingvertexIndexBufferData[i] = narrow_cast<vertex_index_type>(rectangleBase + 3);
1051 flushAllocation(stagingvertexIndexBufferAllocation, 0, VK_WHOLE_SIZE);
1052 unmapMemory(stagingvertexIndexBufferAllocation);
1055 auto& queue = get_graphics_queue();
1056 auto commands = allocateCommandBuffers({queue.command_pool, vk::CommandBufferLevel::ePrimary, 1}).at(0);
1058 commands.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit});
1059 cmdBeginDebugUtilsLabelEXT(commands,
"copy vertex index buffer");
1060 commands.copyBuffer(
1061 stagingvertexIndexBuffer, quadIndexBuffer, {{0, 0,
sizeof(vertex_index_type) * maximum_number_of_indices}});
1062 cmdEndDebugUtilsLabelEXT(commands);
1070 narrow_cast<uint32_t>(commandBuffersToSubmit.
size()),
1071 commandBuffersToSubmit.
data(),
1074 queue.queue.submit(submitInfo, vk::Fence());
1075 queue.queue.waitIdle();
1077 freeCommandBuffers(queue.command_pool, {commands});
1078 destroyBuffer(stagingvertexIndexBuffer, stagingvertexIndexBufferAllocation);
1082 void destroy_quad_index_buffer()
1085 destroyBuffer(quadIndexBuffer, quadIndexBufferAllocation);