HikoGUI
A low latency retained GUI
Loading...
Searching...
No Matches
gfx_surface_vulkan_impl.hpp
1// Copyright Take Vos 2019-2022.
2// Distributed under the Boost Software License, Version 1.0.
3// (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
4
5#pragma once
6
7#include "gfx_surface_vulkan.hpp"
8#include "gfx_surface_delegate_vulkan.hpp"
9#include "gfx_system_vulkan.hpp"
10#include "gfx_device_vulkan_impl.hpp"
11#include "gfx_pipeline_box_vulkan.hpp"
12#include "gfx_pipeline_image_vulkan.hpp"
13#include "gfx_pipeline_SDF_vulkan.hpp"
14#include "gfx_pipeline_alpha_vulkan.hpp"
15#include "gfx_pipeline_tone_mapper_vulkan.hpp"
16#include "../telemetry/telemetry.hpp"
17#include "../utility/utility.hpp"
18#include "../macros.hpp"
19#include <vector>
20
21namespace hi::inline v1 {
22
23inline void gfx_surface::set_device(gfx_device *new_device) noexcept
24{
25 hi_assert_not_null(new_device);
26
27 hilet lock = std::scoped_lock(gfx_system_mutex);
28
29 hi_axiom(gfx_system_mutex.recurse_lock_count());
30
31 if (_device == new_device) {
32 return;
33 }
34
35 if (_device) {
36 loss = gfx_surface_loss::device_lost;
37 teardown();
38 }
39
40 _device = new_device;
41
42 _present_queue = std::addressof(_device->get_present_queue(intrinsic));
43 _graphics_queue = std::addressof(_device->get_graphics_queue(intrinsic));
44}
45
46inline void gfx_surface::add_delegate(gfx_surface_delegate *delegate) noexcept
47{
48 hilet lock = std::scoped_lock(gfx_system_mutex);
49
50 hi_assert_not_null(delegate);
51 auto& delegate_info = _delegates.emplace_back(delegate, _device->createSemaphore());
52
53 if (state >= gfx_surface_state::has_device) {
54 auto& graphics_queue = _device->get_graphics_queue(intrinsic);
55
56 delegate_info.delegate->build_for_new_device(
57 _device->allocator, vulkan_instance(), _device->intrinsic, graphics_queue.queue, graphics_queue.family_queue_index);
58 }
59 if (state >= gfx_surface_state::has_swapchain) {
61 image_views.reserve(swapchain_image_infos.size());
62 for (hilet& image_info : swapchain_image_infos) {
63 image_views.push_back(image_info.image_view);
64 }
65
66 delegate_info.delegate->build_for_new_swapchain(image_views, swapchainImageExtent, swapchainImageFormat);
67 }
68}
69
70inline void gfx_surface::remove_delegate(gfx_surface_delegate *delegate) noexcept
71{
72 hilet lock = std::scoped_lock(gfx_system_mutex);
73
74 hi_assert_not_null(delegate);
75 auto it = std::find_if(_delegates.begin(), _delegates.end(), [delegate](hilet& item) {
76 return item.delegate == delegate;
77 });
78
79 if (state >= gfx_surface_state::has_swapchain) {
80 it->delegate->teardown_for_swapchain_lost();
81 }
82 if (state >= gfx_surface_state::has_device) {
83 it->delegate->teardown_for_device_lost();
84 }
85
86 _device->destroy(it->semaphore);
87
88 _delegates.erase(it);
89}
90
91[[nodiscard]] inline extent2 gfx_surface::size() const noexcept
92{
93 return {narrow_cast<float>(swapchainImageExtent.width), narrow_cast<float>(swapchainImageExtent.height)};
94}
95
96inline void gfx_surface::wait_idle()
97{
98 hi_axiom(gfx_system_mutex.recurse_lock_count());
99
100 hi_assert(_device);
101 if (renderFinishedFence) {
102 _device->waitForFences({renderFinishedFence}, VK_TRUE, std::numeric_limits<uint64_t>::max());
103 }
104 _device->waitIdle();
105 hi_log_info("/waitIdle");
106}
107
108inline std::optional<uint32_t> gfx_surface::acquire_next_image_from_swapchain()
109{
110 hi_axiom(gfx_system_mutex.recurse_lock_count());
111
112 // swap chain, fence & imageAvailableSemaphore must be externally synchronized.
113 uint32_t frameBufferIndex = 0;
114 // hi_log_debug("acquireNextImage '{}'", title);
115
116 hilet result = _device->acquireNextImageKHR(swapchain, 0, imageAvailableSemaphore, vk::Fence(), &frameBufferIndex);
117 // hi_log_debug("acquireNextImage {}", frameBufferIndex);
118
119 switch (result) {
120 case vk::Result::eSuccess:
121 return {frameBufferIndex};
122
123 case vk::Result::eSuboptimalKHR:
124 // Techniqually we received an image here, but we treat it
125 // as a swapchain-lost which should reset the swapchain anyway,
126 // so not rendering to the image should be okay.
127 hi_log_info("acquireNextImageKHR() eSuboptimalKHR");
128 loss = gfx_surface_loss::swapchain_lost;
129 return std::nullopt;
130
131 case vk::Result::eNotReady:
132 // Don't render, we didn't receive an image.
133 // The timeout was zero, so we only expect eSuccess or eNotReady.
134 // This will wait until the next vertical sync.
135 return std::nullopt;
136
137 case vk::Result::eTimeout:
138 // Don't render, we didn't receive an image.
139 // Since we did not set the timeout we don't expect this result.
140 // This will wait until the next vertical sync.
141 hi_log_info("acquireNextImageKHR() eTimeout");
142 return std::nullopt;
143
144 case vk::Result::eErrorOutOfDateKHR:
145 hi_log_info("acquireNextImageKHR() eErrorOutOfDateKHR");
146 loss = gfx_surface_loss::swapchain_lost;
147 return std::nullopt;
148
149 case vk::Result::eErrorSurfaceLostKHR:
150 hi_log_info("acquireNextImageKHR() eErrorSurfaceLostKHR");
151 loss = gfx_surface_loss::window_lost;
152 return std::nullopt;
153
154 default:
155 throw gui_error(std::format("Unknown result from acquireNextImageKHR(). '{}'", to_string(result)));
156 }
157}
158
159inline void gfx_surface::present_image_to_queue(uint32_t frameBufferIndex, vk::Semaphore semaphore)
160{
161 hi_axiom(gfx_system_mutex.recurse_lock_count());
162
163 hi_assert_not_null(_device);
164
168 hi_assert(presentSwapchains.size() == presentImageIndices.size());
169
170 try {
171 // hi_log_debug("presentQueue {}", presentImageIndices.at(0));
172 hilet result = _present_queue->queue.presentKHR(
176 presentSwapchains.data(),
177 presentImageIndices.data()});
178
179 switch (result) {
180 case vk::Result::eSuccess:
181 return;
182
183 case vk::Result::eSuboptimalKHR:
184 hi_log_info("presentKHR() eSuboptimalKHR");
185 loss = gfx_surface_loss::swapchain_lost;
186 return;
187
188 default:
189 throw gui_error(std::format("Unknown result from presentKHR(). '{}'", to_string(result)));
190 }
191
192 } catch (vk::OutOfDateKHRError const&) {
193 hi_log_info("presentKHR() eErrorOutOfDateKHR");
194 loss = gfx_surface_loss::swapchain_lost;
195 return;
196
197 } catch (vk::SurfaceLostKHRError const&) {
198 hi_log_info("presentKHR() eErrorSurfaceLostKHR");
199 loss = gfx_surface_loss::window_lost;
200 return;
201 }
202}
203
204inline gfx_surface_loss gfx_surface::build_for_new_device() noexcept
205{
206 if (_device->score(intrinsic) <= 0) {
207 return gfx_surface_loss::device_lost;
208 }
209
210 box_pipeline->build_for_new_device();
211 image_pipeline->build_for_new_device();
212 SDF_pipeline->build_for_new_device();
213 alpha_pipeline->build_for_new_device();
214 tone_mapper_pipeline->build_for_new_device();
215
216 auto& graphics_queue = _device->get_graphics_queue(intrinsic);
217 for (auto [delegate, semaphore] : _delegates) {
218 hi_assert_not_null(delegate);
219
220 delegate->build_for_new_device(
221 _device->allocator, vulkan_instance(), _device->intrinsic, graphics_queue.queue, graphics_queue.family_queue_index);
222 }
223
224 return gfx_surface_loss::none;
225}
226
227inline gfx_surface_loss gfx_surface::build_for_new_swapchain(extent2 new_size) noexcept
228{
229 try {
230 hilet[clamped_count, clamped_size] = get_image_count_and_size(defaultNumberOfSwapchainImages, new_size);
231 if (not new_size) {
232 // Minimized window, can not build a new swap chain.
233 return gfx_surface_loss::swapchain_lost;
234 }
235
236 if (loss = build_swapchain(clamped_count, clamped_size); loss != gfx_surface_loss::none) {
237 return loss;
238 }
239
240 hilet[clamped_count_check, clamped_size_check] = get_image_count_and_size(clamped_count, clamped_size);
242 // Window has changed during swap chain creation, it is in a inconsistent bad state.
243 // This is a bug in the Vulkan specification.
244 teardown_swapchain();
245 return gfx_surface_loss::swapchain_lost;
246 }
247
248 build_render_passes(); // Render-pass requires the swapchain/color/depth image-format.
249 build_frame_buffers(); // Framebuffer required render passes.
250 build_command_buffers();
251 build_semaphores();
252 hi_assert_not_null(box_pipeline);
253 hi_assert_not_null(image_pipeline);
254 hi_assert_not_null(SDF_pipeline);
255 hi_assert_not_null(alpha_pipeline);
256 hi_assert_not_null(tone_mapper_pipeline);
257 box_pipeline->build_for_new_swapchain(renderPass, 0, swapchainImageExtent);
258 image_pipeline->build_for_new_swapchain(renderPass, 1, swapchainImageExtent);
259 SDF_pipeline->build_for_new_swapchain(renderPass, 2, swapchainImageExtent);
260 alpha_pipeline->build_for_new_swapchain(renderPass, 3, swapchainImageExtent);
261 tone_mapper_pipeline->build_for_new_swapchain(renderPass, 4, swapchainImageExtent);
262
264 image_views.reserve(swapchain_image_infos.size());
265 for (hilet& image_info : swapchain_image_infos) {
266 image_views.push_back(image_info.image_view);
267 }
268
269 for (auto [delegate, semaphore] : _delegates) {
270 hi_assert_not_null(delegate);
271 delegate->build_for_new_swapchain(image_views, swapchainImageExtent, swapchainImageFormat);
272 }
273
274 return gfx_surface_loss::none;
275
276 } catch (vk::SurfaceLostKHRError const&) {
277 // During swapchain build we lost the surface.
278 // This state will cause the swapchain to be teardown.
279 return gfx_surface_loss::window_lost;
280 }
281}
282
283inline void gfx_surface::build(extent2 new_size) noexcept
284{
285 hi_axiom(gfx_system_mutex.recurse_lock_count());
286 hi_assert(loss == gfx_surface_loss::none);
287
288 if (state == gfx_surface_state::has_window) {
289 if (_device) {
290 if (loss = build_for_new_device(); loss != gfx_surface_loss::none) {
291 return;
292 }
293 state = gfx_surface_state::has_device;
294 }
295 }
296
297 if (state == gfx_surface_state::has_device) {
298 if (hilet tmp = build_for_new_swapchain(new_size); tmp == gfx_surface_loss::swapchain_lost) {
299 // No new swapchain was created, state has_device is maintained.
300 return;
301
302 } else if (loss = tmp; tmp != gfx_surface_loss::none) {
303 return;
304 }
305
306 state = gfx_surface_state::has_swapchain;
307 }
308}
309
310inline void gfx_surface::teardown_for_swapchain_lost() noexcept
311{
312 hi_log_info("Tearing down because the window lost the swapchain.");
313 wait_idle();
314
315 for (auto [delegate, semaphore] : _delegates) {
316 hi_assert_not_null(delegate);
317 delegate->teardown_for_swapchain_lost();
318 }
319
320 tone_mapper_pipeline->teardown_for_swapchain_lost();
321 alpha_pipeline->teardown_for_swapchain_lost();
322 SDF_pipeline->teardown_for_swapchain_lost();
323 image_pipeline->teardown_for_swapchain_lost();
324 box_pipeline->teardown_for_swapchain_lost();
325 teardown_semaphores();
326 teardown_command_buffers();
327 teardown_frame_buffers();
328 teardown_render_passes();
329 teardown_swapchain();
330}
331
332inline void gfx_surface::teardown_for_device_lost() noexcept
333{
334 hi_log_info("Tearing down because the window lost the vulkan device.");
335 for (auto [delegate, semaphore] : _delegates) {
336 hi_assert_not_null(delegate);
337 delegate->teardown_for_device_lost();
338 }
339 tone_mapper_pipeline->teardown_for_device_lost();
340 alpha_pipeline->teardown_for_device_lost();
341 SDF_pipeline->teardown_for_device_lost();
342 image_pipeline->teardown_for_device_lost();
343 box_pipeline->teardown_for_device_lost();
344 _device = nullptr;
345}
346
347inline void gfx_surface::teardown_for_window_lost() noexcept
348{
349 gfx_system::global().destroySurfaceKHR(intrinsic);
350}
351
352inline void gfx_surface::teardown() noexcept
353{
354 hi_axiom(gfx_system_mutex.recurse_lock_count());
355
356 if (state == gfx_surface_state::has_swapchain and loss >= gfx_surface_loss::swapchain_lost) {
357 teardown_for_swapchain_lost();
358 state = gfx_surface_state::has_device;
359 }
360
361 if (state == gfx_surface_state::has_device and loss >= gfx_surface_loss::device_lost) {
362 teardown_for_device_lost();
363 state = gfx_surface_state::has_window;
364 }
365
366 if (state == gfx_surface_state::has_window and loss >= gfx_surface_loss::window_lost) {
367 hi_log_info("Tearing down because the window doesn't exist anymore.");
368 teardown_for_window_lost();
369 state = gfx_surface_state::no_window;
370 }
371 loss = gfx_surface_loss::none;
372}
373
374inline void gfx_surface::update(extent2 new_size) noexcept
375{
376 hilet lock = std::scoped_lock(gfx_system_mutex);
377
378 if (size() != new_size and state == gfx_surface_state::has_swapchain) {
379 // On resize lose the swapchain, which will be cleaned up at teardown().
380 loss = gfx_surface_loss::swapchain_lost;
381 }
382
383 // Tear down then buildup from the Vulkan objects that where invalid.
384 teardown();
385 build(new_size);
386}
387
388inline draw_context gfx_surface::render_start(aarectangle redraw_rectangle)
389{
390 // Extent the redraw_rectangle to the render-area-granularity to improve performance on tile based GPUs.
391 redraw_rectangle = ceil(redraw_rectangle, _render_area_granularity);
392
393 hilet lock = std::scoped_lock(gfx_system_mutex);
394
395 auto r = draw_context{
396 *_device,
397 box_pipeline->vertexBufferData,
398 image_pipeline->vertexBufferData,
399 SDF_pipeline->vertexBufferData,
400 alpha_pipeline->vertexBufferData};
401
402 // Bail out when the window is not yet ready to be rendered, or if there is nothing to render.
403 if (state != gfx_surface_state::has_swapchain or not redraw_rectangle) {
404 return r;
405 }
406
407 hilet optional_frame_buffer_index = acquire_next_image_from_swapchain();
409 // No image is ready to be rendered, yet, possibly because our vertical sync function
410 // is not working correctly.
411 return r;
412 }
413
414 // Setting the frame buffer index, also enabled the draw_context.
415 r.frame_buffer_index = narrow_cast<size_t>(*optional_frame_buffer_index);
416
417 // Record which part of the image will be redrawn on the current swapchain image.
418 auto& current_image = swapchain_image_infos.at(r.frame_buffer_index);
419 current_image.redraw_rectangle = redraw_rectangle;
420
421 // Calculate the scissor rectangle, from the combined redraws of the complete swapchain.
422 // We need to do this so that old redraws are also executed in the current swapchain image.
423 r.scissor_rectangle =
424 std::accumulate(swapchain_image_infos.cbegin(), swapchain_image_infos.cend(), aarectangle{}, [](hilet& sum, hilet& item) {
425 return sum | item.redraw_rectangle;
426 });
427
428 // Wait until previous rendering has finished, before the next rendering.
429 _device->waitForFences({renderFinishedFence}, VK_TRUE, std::numeric_limits<uint64_t>::max());
430
431 // Unsignal the fence so we will not modify/destroy the command buffers during rendering.
432 _device->resetFences({renderFinishedFence});
433
434 return r;
435}
436
437inline void gfx_surface::render_finish(draw_context const& context)
438{
439 hilet lock = std::scoped_lock(gfx_system_mutex);
440
441 auto& current_image = swapchain_image_infos.at(context.frame_buffer_index);
442
443 // Because we use a scissor/render_area, the image from the swapchain around the scissor-area is reused.
444 // Because of reuse the swapchain image must already be in the "ePresentSrcKHR" layout.
445 // The swapchain creates images in undefined layout, so we need to change the layout once.
446 if (not current_image.layout_is_present) {
447 _device->transition_layout(
448 current_image.image, swapchainImageFormat.format, vk::ImageLayout::eUndefined, vk::ImageLayout::ePresentSrcKHR);
449
450 current_image.layout_is_present = true;
451 }
452
453 // Clamp the scissor rectangle to the size of the window.
454 hilet clamped_scissor_rectangle = intersect(
455 context.scissor_rectangle,
456 aarectangle{0, 0, narrow_cast<float>(swapchainImageExtent.width), narrow_cast<float>(swapchainImageExtent.height)});
457
458 hilet render_area = vk::Rect2D{
459 vk::Offset2D(
462 swapchainImageExtent.height - clamped_scissor_rectangle.bottom() - clamped_scissor_rectangle.height())),
463 vk::Extent2D(
465
466 // Start the first delegate when the swapchain-image becomes available.
467 auto start_semaphore = imageAvailableSemaphore;
468 for (auto [delegate, end_semaphore] : _delegates) {
469 hi_assert_not_null(delegate);
470
471 delegate->draw(narrow_cast<uint32_t>(context.frame_buffer_index), start_semaphore, end_semaphore, render_area);
473 }
474
475 // Wait for the semaphore of the last delegate before it will write into the swapchain-image.
476 fill_command_buffer(current_image, context, render_area);
477 submit_command_buffer(start_semaphore);
478
479 // Signal the fence when all rendering has finished on the graphics queue.
480 // When the fence is signaled we can modify/destroy the command buffers.
481 [[maybe_unused]] hilet submit_result = _graphics_queue->queue.submit(0, nullptr, renderFinishedFence);
482
483 present_image_to_queue(narrow_cast<uint32_t>(context.frame_buffer_index), renderFinishedSemaphore);
484
485 // Do an early tear down of invalid vulkan objects.
486 teardown();
487}
488
489inline void gfx_surface::fill_command_buffer(
490 swapchain_image_info const& current_image,
491 draw_context const& context,
492 vk::Rect2D render_area)
493{
494 hi_axiom(gfx_system_mutex.recurse_lock_count());
495
496 auto t = trace<"fill_command_buffer">{};
497
498 commandBuffer.reset(vk::CommandBufferResetFlagBits::eReleaseResources);
499 commandBuffer.begin({vk::CommandBufferUsageFlagBits::eSimultaneousUse});
500
501 hilet background_color_f32x4 = f32x4{1.0f, 0.0f, 0.0f, 1.0f};
503
504 hilet colorClearValue = vk::ClearColorValue{background_color_array};
505 hilet sdfClearValue = vk::ClearColorValue{std::array{0.0f, 0.0f, 0.0f, 0.0f}};
506 hilet depthClearValue = vk::ClearDepthStencilValue{0.0, 0};
507 hilet clearValues = std::array{
508 vk::ClearValue{depthClearValue},
509 vk::ClearValue{colorClearValue},
510 vk::ClearValue{sdfClearValue},
511 vk::ClearValue{colorClearValue}};
512
513 // The scissor and render area makes sure that the frame buffer is not modified where we are not drawing the widgets.
515 commandBuffer.setScissor(0, scissors);
516
517 commandBuffer.beginRenderPass(
518 {renderPass, current_image.frame_buffer, render_area, narrow_cast<uint32_t>(clearValues.size()), clearValues.data()},
519 vk::SubpassContents::eInline);
520
521 box_pipeline->draw_in_command_buffer(commandBuffer, context);
522 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
523 image_pipeline->draw_in_command_buffer(commandBuffer, context);
524 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
525 SDF_pipeline->draw_in_command_buffer(commandBuffer, context);
526 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
527 alpha_pipeline->draw_in_command_buffer(commandBuffer, context);
528 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
529 tone_mapper_pipeline->draw_in_command_buffer(commandBuffer, context);
530
531 commandBuffer.endRenderPass();
532 commandBuffer.end();
533}
534
535inline void gfx_surface::submit_command_buffer(vk::Semaphore delegate_semaphore)
536{
537 hi_axiom(gfx_system_mutex.recurse_lock_count());
538
540
541 hilet waitStages = std::array{vk::PipelineStageFlags{vk::PipelineStageFlagBits::eColorAttachmentOutput}};
542
543 hi_assert(waitSemaphores.size() == waitStages.size());
544
545 hilet signalSemaphores = std::array{renderFinishedSemaphore};
546 hilet commandBuffersToSubmit = std::array{commandBuffer};
547
548 hilet submitInfo = std::array{vk::SubmitInfo{
551 waitStages.data(),
555 signalSemaphores.data()}};
556
557 _graphics_queue->queue.submit(submitInfo, vk::Fence());
558}
559
560inline std::tuple<std::size_t, extent2> gfx_surface::get_image_count_and_size(std::size_t new_count, extent2 new_size)
561{
562 hi_axiom(gfx_system_mutex.recurse_lock_count());
563
564 hilet surfaceCapabilities = _device->getSurfaceCapabilitiesKHR(intrinsic);
565
567 hilet max_count = narrow_cast<std::size_t>(surfaceCapabilities.maxImageCount ? surfaceCapabilities.maxImageCount : 3);
568 hilet clamped_count = std::clamp(new_count, min_count, max_count);
569 hi_log_info(
570 "gfx_surface min_count={}, max_count={}, requested_count={}, count={}", min_count, max_count, new_count, clamped_count);
571
572 // minImageExtent and maxImageExtent are always valid. currentImageExtent may be 0xffffffff.
573 hilet min_size = extent2{
574 narrow_cast<float>(surfaceCapabilities.minImageExtent.width),
575 narrow_cast<float>(surfaceCapabilities.minImageExtent.height)};
576 hilet max_size = extent2{
577 narrow_cast<float>(surfaceCapabilities.maxImageExtent.width),
578 narrow_cast<float>(surfaceCapabilities.maxImageExtent.height)};
579 hilet clamped_size = clamp(new_size, min_size, max_size);
580
581 hi_log_info("gfx_surface min_size={}, max_size={}, requested_size={}, size={}", min_size, max_size, new_size, clamped_size);
582 return {clamped_count, clamped_size};
583}
584
585inline gfx_surface_loss gfx_surface::build_swapchain(std::size_t new_count, extent2 new_size)
586{
587 hi_axiom(gfx_system_mutex.recurse_lock_count());
588
589 hi_log_info("Building swap chain");
590
591 hilet sharingMode = _graphics_queue == _present_queue ? vk::SharingMode::eExclusive : vk::SharingMode::eConcurrent;
592
594 _graphics_queue->family_queue_index, _present_queue->family_queue_index};
595
596 swapchainImageFormat = _device->get_surface_format(intrinsic);
597 nrSwapchainImages = narrow_cast<uint32_t>(new_count);
598 swapchainImageExtent = VkExtent2D{round_cast<uint32_t>(new_size.width()), round_cast<uint32_t>(new_size.height())};
599 vk::SwapchainCreateInfoKHR swapchainCreateInfo{
600 vk::SwapchainCreateFlagsKHR(),
601 intrinsic,
602 nrSwapchainImages,
603 swapchainImageFormat.format,
604 swapchainImageFormat.colorSpace,
605 swapchainImageExtent,
606 1, // imageArrayLayers
607 vk::ImageUsageFlagBits::eColorAttachment,
609 sharingMode == vk::SharingMode::eConcurrent ? narrow_cast<uint32_t>(sharingQueueFamilyAllIndices.size()) : 0,
613 _device->get_present_mode(intrinsic),
614 VK_TRUE, // clipped
615 nullptr};
616
617 vk::Result const result = _device->createSwapchainKHR(&swapchainCreateInfo, nullptr, &swapchain);
618 switch (result) {
619 case vk::Result::eSuccess:
620 break;
621
622 case vk::Result::eErrorSurfaceLostKHR:
623 return gfx_surface_loss::window_lost;
624
625 default:
626 throw gui_error(std::format("Unknown result from createSwapchainKHR(). '{}'", to_string(result)));
627 }
628
629 hi_log_info("Finished building swap chain");
630 hi_log_info(" - extent=({}, {})", swapchainCreateInfo.imageExtent.width, swapchainCreateInfo.imageExtent.height);
631 hi_log_info(
632 " - colorSpace={}, format={}",
633 vk::to_string(swapchainCreateInfo.imageColorSpace),
634 vk::to_string(swapchainCreateInfo.imageFormat));
635 hi_log_info(
636 " - presentMode={}, imageCount={}", vk::to_string(swapchainCreateInfo.presentMode), swapchainCreateInfo.minImageCount);
637
638 // Create depth matching the swapchain.
639 vk::ImageCreateInfo const depthImageCreateInfo = {
640 vk::ImageCreateFlags(),
641 vk::ImageType::e2D,
642 depthImageFormat,
643 vk::Extent3D(swapchainCreateInfo.imageExtent.width, swapchainCreateInfo.imageExtent.height, 1),
644 1, // mipLevels
645 1, // arrayLayers
646 vk::SampleCountFlagBits::e1,
647 vk::ImageTiling::eOptimal,
648 vk::ImageUsageFlagBits::eDepthStencilAttachment | _device->transientImageUsageFlags,
649 vk::SharingMode::eExclusive,
650 0,
651 nullptr,
652 vk::ImageLayout::eUndefined};
653
656 depthAllocationCreateInfo.pUserData = const_cast<char *>("vk::Image depth attachment");
657 depthAllocationCreateInfo.usage = _device->lazyMemoryUsage;
658 std::tie(depthImage, depthImageAllocation) = _device->createImage(depthImageCreateInfo, depthAllocationCreateInfo);
659 _device->setDebugUtilsObjectNameEXT(depthImage, "vk::Image depth attachment");
660
661 // Create color image matching the swapchain.
662 vk::ImageCreateInfo const colorImageCreateInfo = {
663 vk::ImageCreateFlags(),
664 vk::ImageType::e2D,
665 colorImageFormat,
666 vk::Extent3D(swapchainCreateInfo.imageExtent.width, swapchainCreateInfo.imageExtent.height, 1),
667 1, // mipLevels
668 1, // arrayLayers
669 vk::SampleCountFlagBits::e1,
670 vk::ImageTiling::eOptimal,
671 vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eInputAttachment | _device->transientImageUsageFlags,
672 vk::SharingMode::eExclusive,
673 0,
674 nullptr,
675 vk::ImageLayout::eUndefined};
676
679 colorAllocationCreateInfo.pUserData = const_cast<char *>("vk::Image color attachment");
680 colorAllocationCreateInfo.usage = _device->lazyMemoryUsage;
681
682 std::tie(colorImages[0], colorImageAllocations[0]) = _device->createImage(colorImageCreateInfo, colorAllocationCreateInfo);
683 _device->setDebugUtilsObjectNameEXT(colorImages[0], "vk::Image color attachment");
684
685 return gfx_surface_loss::none;
686}
687
688inline void gfx_surface::teardown_swapchain()
689{
690 hi_axiom(gfx_system_mutex.recurse_lock_count());
691
692 _device->destroy(swapchain);
693 _device->destroyImage(depthImage, depthImageAllocation);
694
695 for (std::size_t i = 0; i != colorImages.size(); ++i) {
696 _device->destroyImage(colorImages[i], colorImageAllocations[i]);
697 }
698}
699
700inline void gfx_surface::build_frame_buffers()
701{
702 hi_axiom(gfx_system_mutex.recurse_lock_count());
703
704 depthImageView = _device->createImageView(
705 {vk::ImageViewCreateFlags(),
706 depthImage,
707 vk::ImageViewType::e2D,
708 depthImageFormat,
709 vk::ComponentMapping(),
710 {vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1}});
711
712 for (std::size_t i = 0; i != colorImageViews.size(); ++i) {
713 colorImageViews[i] = _device->createImageView(
714 {vk::ImageViewCreateFlags(),
715 colorImages[i],
716 vk::ImageViewType::e2D,
717 colorImageFormat,
718 vk::ComponentMapping(),
719 {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}});
720
721 colorDescriptorImageInfos[i] = {vk::Sampler(), colorImageViews[i], vk::ImageLayout::eShaderReadOnlyOptimal};
722 }
723
724 auto swapchain_images = _device->getSwapchainImagesKHR(swapchain);
725 for (auto image : swapchain_images) {
726 auto image_view = _device->createImageView(
727 {vk::ImageViewCreateFlags(),
728 image,
729 vk::ImageViewType::e2D,
730 swapchainImageFormat.format,
731 vk::ComponentMapping(),
732 {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}});
733
734 hilet attachments = std::array{depthImageView, colorImageViews[0], image_view};
735
736 hilet frame_buffer = _device->createFramebuffer({
737 vk::FramebufferCreateFlags(),
738 renderPass,
740 attachments.data(),
741 swapchainImageExtent.width,
742 swapchainImageExtent.height,
743 1 // layers
744 });
745
746 swapchain_image_infos.emplace_back(
747 std::move(image), std::move(image_view), std::move(frame_buffer), aarectangle{}, false);
748 }
749
750 hi_assert(swapchain_image_infos.size() == swapchain_images.size());
751}
752
753inline void gfx_surface::teardown_frame_buffers()
754{
755 hi_axiom(gfx_system_mutex.recurse_lock_count());
756
757 for (auto& info : swapchain_image_infos) {
758 _device->destroy(info.frame_buffer);
759 _device->destroy(info.image_view);
760 }
761 swapchain_image_infos.clear();
762
763 _device->destroy(depthImageView);
764 for (std::size_t i = 0; i != colorImageViews.size(); ++i) {
765 _device->destroy(colorImageViews[i]);
766 }
767}
768
781inline void gfx_surface::build_render_passes()
782{
783 hi_axiom(gfx_system_mutex.recurse_lock_count());
784
786 vk::AttachmentDescription{
787 // Depth attachment
788 vk::AttachmentDescriptionFlags(),
789 depthImageFormat,
790 vk::SampleCountFlagBits::e1,
791 vk::AttachmentLoadOp::eClear,
792 vk::AttachmentStoreOp::eDontCare,
793 vk::AttachmentLoadOp::eDontCare, // stencilLoadOp
794 vk::AttachmentStoreOp::eDontCare, // stencilStoreOp
795 vk::ImageLayout::eUndefined, // initialLayout
796 vk::ImageLayout::eDepthStencilAttachmentOptimal // finalLayout
797 },
798 vk::AttachmentDescription{
799 // Color attachment
800 vk::AttachmentDescriptionFlags(),
801 colorImageFormat,
802 vk::SampleCountFlagBits::e1,
803 vk::AttachmentLoadOp::eClear,
804 vk::AttachmentStoreOp::eDontCare,
805 vk::AttachmentLoadOp::eDontCare, // stencilLoadOp
806 vk::AttachmentStoreOp::eDontCare, // stencilStoreOp
807 vk::ImageLayout::eUndefined, // initialLayout
808 vk::ImageLayout::eColorAttachmentOptimal // finalLayout
809 },
810 vk::AttachmentDescription{
811 // Swapchain attachment.
812 vk::AttachmentDescriptionFlags(),
813 swapchainImageFormat.format,
814 vk::SampleCountFlagBits::e1,
815 vk::AttachmentLoadOp::eLoad,
816 vk::AttachmentStoreOp::eStore,
817 vk::AttachmentLoadOp::eDontCare, // stencilLoadOp
818 vk::AttachmentStoreOp::eDontCare, // stencilStoreOp
819 vk::ImageLayout::ePresentSrcKHR, // initialLayout
820 vk::ImageLayout::ePresentSrcKHR // finalLayout
821 }};
822
823 hilet depth_attachment_reference = vk::AttachmentReference{0, vk::ImageLayout::eDepthStencilAttachmentOptimal};
824 hilet color_attachment_references = std::array{vk::AttachmentReference{1, vk::ImageLayout::eColorAttachmentOptimal}};
825 hilet color_input_attachment_references = std::array{vk::AttachmentReference{1, vk::ImageLayout::eShaderReadOnlyOptimal}};
826 hilet swapchain_attachment_references = std::array{vk::AttachmentReference{2, vk::ImageLayout::eColorAttachmentOptimal}};
827
829 vk::SubpassDescription{
830 vk::SubpassDescriptionFlags(), // Subpass 0 Box
831 vk::PipelineBindPoint::eGraphics,
832 0, // inputAttchmentReferencesCount
833 nullptr, // inputAttachmentReferences
836 nullptr, // resolveAttachments
838
839 },
840 vk::SubpassDescription{
841 vk::SubpassDescriptionFlags(), // Subpass 1 Image
842 vk::PipelineBindPoint::eGraphics,
843 0, // inputAttchmentReferencesCount
844 nullptr, // inputAttachmentReferences
847 nullptr, // resolveAttachments
849
850 },
851 vk::SubpassDescription{
852 vk::SubpassDescriptionFlags(), // Subpass 2 SDF
853 vk::PipelineBindPoint::eGraphics,
854 0,
855 nullptr,
858 nullptr, // resolveAttachments
860
861 },
862 vk::SubpassDescription{
863 vk::SubpassDescriptionFlags(), // Subpass 3 alpha
864 vk::PipelineBindPoint::eGraphics,
865 0,
866 nullptr,
869 nullptr, // resolveAttachments
871
872 },
873 vk::SubpassDescription{
874 vk::SubpassDescriptionFlags(), // Subpass 4 tone-mapper
875 vk::PipelineBindPoint::eGraphics,
880 nullptr,
881 nullptr}};
882
884 vk::SubpassDependency{
886 0,
887 vk::PipelineStageFlagBits::eBottomOfPipe,
888 vk::PipelineStageFlagBits::eColorAttachmentOutput,
889 vk::AccessFlagBits::eMemoryRead,
890 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
891 vk::DependencyFlagBits::eByRegion},
892 // Subpass 0: Render shaded polygons to color+depth with fixed function alpha compositing
893 vk::SubpassDependency{
894 0,
895 1,
896 vk::PipelineStageFlagBits::eColorAttachmentOutput,
897 vk::PipelineStageFlagBits::eColorAttachmentOutput,
898 vk::AccessFlagBits::eColorAttachmentWrite,
899 vk::AccessFlagBits::eColorAttachmentRead,
900 vk::DependencyFlagBits::eByRegion},
901 // Subpass 1: Render texture mapped polygons to color+depth with fixed function alpha compositing
902 vk::SubpassDependency{
903 1,
904 2,
905 vk::PipelineStageFlagBits::eColorAttachmentOutput,
906 vk::PipelineStageFlagBits::eColorAttachmentOutput,
907 vk::AccessFlagBits::eColorAttachmentWrite,
908 vk::AccessFlagBits::eColorAttachmentRead,
909 vk::DependencyFlagBits::eByRegion},
910 // Subpass 2: Render SDF-texture mapped polygons to color+depth with fixed function alpha compositing
911 vk::SubpassDependency{
912 2,
913 3,
914 vk::PipelineStageFlagBits::eColorAttachmentOutput,
915 vk::PipelineStageFlagBits::eFragmentShader,
916 vk::AccessFlagBits::eColorAttachmentWrite,
917 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eInputAttachmentRead,
918 vk::DependencyFlagBits::eByRegion},
919 // Subpass 3: Render alpha polygons to color+depth with alpha override
920 vk::SubpassDependency{
921 3,
922 4,
923 vk::PipelineStageFlagBits::eColorAttachmentOutput,
924 vk::PipelineStageFlagBits::eFragmentShader,
925 vk::AccessFlagBits::eColorAttachmentWrite,
926 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eInputAttachmentRead,
927 vk::DependencyFlagBits::eByRegion},
928 // Subpass 4: Tone mapping color to swapchain.
929 vk::SubpassDependency{
930 4,
932 vk::PipelineStageFlagBits::eColorAttachmentOutput,
933 vk::PipelineStageFlagBits::eBottomOfPipe,
934 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
935 vk::AccessFlagBits::eMemoryRead,
936 vk::DependencyFlagBits::eByRegion}};
937
938 vk::RenderPassCreateInfo const render_pass_create_info = {
939 vk::RenderPassCreateFlags(),
940 narrow_cast<uint32_t>(attachment_descriptions.size()), // attachmentCount
941 attachment_descriptions.data(), // attachments
942 narrow_cast<uint32_t>(subpass_descriptions.size()), // subpassCount
943 subpass_descriptions.data(), // subpasses
944 narrow_cast<uint32_t>(subpass_dependency.size()), // dependencyCount
945 subpass_dependency.data() // dependencies
946 };
947
948 renderPass = _device->createRenderPass(render_pass_create_info);
949 hilet granularity = _device->getRenderAreaGranularity(renderPass);
950 _render_area_granularity = extent2{narrow_cast<float>(granularity.width), narrow_cast<float>(granularity.height)};
951}
952
953inline void gfx_surface::teardown_render_passes()
954{
955 hi_axiom(gfx_system_mutex.recurse_lock_count());
956
957 _device->destroy(renderPass);
958}
959
960inline void gfx_surface::build_semaphores()
961{
962 hi_axiom(gfx_system_mutex.recurse_lock_count());
963
964 imageAvailableSemaphore = _device->createSemaphore();
965 renderFinishedSemaphore = _device->createSemaphore();
966
967 // This fence is used to wait for the Window and its Pipelines to be idle.
968 // It should therefor be signed at the start so that when no rendering has been
969 // done it is still idle.
970 renderFinishedFence = _device->createFence({vk::FenceCreateFlagBits::eSignaled});
971}
972
973inline void gfx_surface::teardown_semaphores()
974{
975 hi_axiom(gfx_system_mutex.recurse_lock_count());
976
977 _device->destroy(renderFinishedSemaphore);
978 _device->destroy(imageAvailableSemaphore);
979 _device->destroy(renderFinishedFence);
980}
981
982inline void gfx_surface::build_command_buffers()
983{
984 hi_axiom(gfx_system_mutex.recurse_lock_count());
985
986 hilet commandBuffers = _device->allocateCommandBuffers({_graphics_queue->command_pool, vk::CommandBufferLevel::ePrimary, 1});
987
988 commandBuffer = commandBuffers.at(0);
989}
990
991inline void gfx_surface::teardown_command_buffers()
992{
993 hi_axiom(gfx_system_mutex.recurse_lock_count());
994 hilet commandBuffers = std::vector<vk::CommandBuffer>{commandBuffer};
995
996 _device->freeCommandBuffers(_graphics_queue->command_pool, commandBuffers);
997}
998
999[[nodiscard]] inline std::unique_ptr<gfx_surface> make_unique_gfx_surface(os_handle instance, void *os_window)
1000{
1001 hilet lock = std::scoped_lock(gfx_system_mutex);
1002
1003 auto surface_create_info = vk::Win32SurfaceCreateInfoKHR{
1004 vk::Win32SurfaceCreateFlagsKHR(), reinterpret_cast<HINSTANCE>(instance), reinterpret_cast<HWND>(os_window)};
1005
1006 auto vulkan_surface = vulkan_instance().createWin32SurfaceKHR(surface_create_info);
1007
1008 auto surface = std::make_unique<gfx_surface>(vulkan_surface);
1009
1010 // Now that we have a physical window and render surface it is time to find the gfx-device
1011 // for rendering on this surface.
1012 auto device = find_best_device_for_surface(surface->intrinsic);
1013 if (not device) {
1014 throw gfx_error("Could not find a vulkan-device matching this surface");
1015 }
1016 surface->set_device(device);
1017
1018 return surface;
1019}
1020
1021} // namespace hi::inline v1
DOXYGEN BUG.
Definition algorithm.hpp:16
unfair_recursive_mutex gfx_system_mutex
Global mutex for GUI elements, like gfx_system, gfx_device, Windows and Widgets.
Definition gfx_system_globals.hpp:18
gfx_surface_loss
Definition gfx_surface_state.hpp:20
constexpr Out narrow_cast(In const &rhs) noexcept
Cast numeric values without loss of precision.
Definition cast.hpp:377
Definition gfx_device_vulkan.hpp:24
A delegate for drawing on a window below the HikoGUI user interface.
Definition gfx_surface_delegate_vulkan.hpp:23
T accumulate(T... args)
T addressof(T... args)
T ceil(T... args)
T data(T... args)
T end(T... args)
T find_if(T... args)
T lock(T... args)
T max(T... args)
T move(T... args)
T reserve(T... args)
T tie(T... args)
T to_string(T... args)