HikoGUI
A low latency retained GUI
Loading...
Searching...
No Matches
gfx_surface_vulkan_impl.hpp
1// Copyright Take Vos 2019-2022.
2// Distributed under the Boost Software License, Version 1.0.
3// (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
4
5#pragma once
6
7#include "gfx_surface_vulkan_intf.hpp"
8#include "gfx_surface_delegate_vulkan.hpp"
9#include "gfx_system_vulkan_intf.hpp"
10#include "gfx_device_vulkan_impl.hpp"
11#include "gfx_pipeline_box_vulkan_intf.hpp"
12#include "gfx_pipeline_image_vulkan_intf.hpp"
13#include "gfx_pipeline_SDF_vulkan_intf.hpp"
14#include "gfx_pipeline_override_vulkan_intf.hpp"
15#include "gfx_pipeline_tone_mapper_vulkan_intf.hpp"
16#include "../telemetry/telemetry.hpp"
17#include "../utility/utility.hpp"
18#include "../macros.hpp"
19#include <vector>
20#include <vulkan/vulkan.hpp>
21
22hi_export_module(hikogui.GFX : gfx_surface_impl);
23
24hi_export namespace hi::inline v1 {
25
26inline void gfx_surface::set_device(gfx_device *new_device) noexcept
27{
28 hi_assert_not_null(new_device);
29
30 auto const lock = std::scoped_lock(gfx_system_mutex);
31
32 hi_axiom(gfx_system_mutex.recurse_lock_count());
33
34 if (_device == new_device) {
35 return;
36 }
37
38 if (_device) {
39 loss = gfx_surface_loss::device_lost;
40 teardown();
41 }
42
43 _device = new_device;
44
45 _present_queue = std::addressof(_device->get_present_queue(intrinsic));
46 _graphics_queue = std::addressof(_device->get_graphics_queue(intrinsic));
47}
48
49inline void gfx_surface::add_delegate(gfx_surface_delegate *delegate) noexcept
50{
51 auto const lock = std::scoped_lock(gfx_system_mutex);
52
53 hi_assert_not_null(delegate);
54 auto& delegate_info = _delegates.emplace_back(delegate, _device->createSemaphore());
55
56 if (state >= gfx_surface_state::has_device) {
57 auto& graphics_queue = _device->get_graphics_queue(intrinsic);
58
59 delegate_info.delegate->build_for_new_device(
60 _device->allocator, vulkan_instance(), _device->intrinsic, graphics_queue.queue, graphics_queue.family_queue_index);
61 }
62 if (state >= gfx_surface_state::has_swapchain) {
63 auto image_views = std::vector<vk::ImageView>{};
64 image_views.reserve(swapchain_image_infos.size());
65 for (auto const& image_info : swapchain_image_infos) {
66 image_views.push_back(image_info.image_view);
67 }
68
69 delegate_info.delegate->build_for_new_swapchain(image_views, swapchainImageExtent, swapchainImageFormat);
70 }
71}
72
73inline void gfx_surface::remove_delegate(gfx_surface_delegate *delegate) noexcept
74{
75 auto const lock = std::scoped_lock(gfx_system_mutex);
76
77 hi_assert_not_null(delegate);
78 auto it = std::find_if(_delegates.begin(), _delegates.end(), [delegate](auto const& item) {
79 return item.delegate == delegate;
80 });
81
82 if (state >= gfx_surface_state::has_swapchain) {
83 it->delegate->teardown_for_swapchain_lost();
84 }
85 if (state >= gfx_surface_state::has_device) {
86 it->delegate->teardown_for_device_lost();
87 }
88
89 _device->destroy(it->semaphore);
90
91 _delegates.erase(it);
92}
93
94[[nodiscard]] inline extent2 gfx_surface::size() const noexcept
95{
96 return {narrow_cast<float>(swapchainImageExtent.width), narrow_cast<float>(swapchainImageExtent.height)};
97}
98
99inline void gfx_surface::wait_idle()
100{
101 hi_axiom(gfx_system_mutex.recurse_lock_count());
102
103 hi_assert(_device);
104 if (renderFinishedFence) {
105 _device->waitForFences({renderFinishedFence}, VK_TRUE, std::numeric_limits<uint64_t>::max());
106 }
107 _device->waitIdle();
108 hi_log_info("/waitIdle");
109}
110
111inline std::optional<uint32_t> gfx_surface::acquire_next_image_from_swapchain()
112{
113 hi_axiom(gfx_system_mutex.recurse_lock_count());
114
115 // swap chain, fence & imageAvailableSemaphore must be externally synchronized.
116 uint32_t frameBufferIndex = 0;
117 // hi_log_debug("acquireNextImage '{}'", title);
118
119 auto const result = _device->acquireNextImageKHR(swapchain, 0, imageAvailableSemaphore, vk::Fence(), &frameBufferIndex);
120 // hi_log_debug("acquireNextImage {}", frameBufferIndex);
121
122 switch (result) {
123 case vk::Result::eSuccess:
124 return {frameBufferIndex};
125
126 case vk::Result::eSuboptimalKHR:
127 // Techniqually we received an image here, but we treat it
128 // as a swapchain-lost which should reset the swapchain anyway,
129 // so not rendering to the image should be okay.
130 hi_log_info("acquireNextImageKHR() eSuboptimalKHR");
131 loss = gfx_surface_loss::swapchain_lost;
132 return std::nullopt;
133
134 case vk::Result::eNotReady:
135 // Don't render, we didn't receive an image.
136 // The timeout was zero, so we only expect eSuccess or eNotReady.
137 // This will wait until the next vertical sync.
138 return std::nullopt;
139
140 case vk::Result::eTimeout:
141 // Don't render, we didn't receive an image.
142 // Since we did not set the timeout we don't expect this result.
143 // This will wait until the next vertical sync.
144 hi_log_info("acquireNextImageKHR() eTimeout");
145 return std::nullopt;
146
147 case vk::Result::eErrorOutOfDateKHR:
148 hi_log_info("acquireNextImageKHR() eErrorOutOfDateKHR");
149 loss = gfx_surface_loss::swapchain_lost;
150 return std::nullopt;
151
152 case vk::Result::eErrorSurfaceLostKHR:
153 hi_log_info("acquireNextImageKHR() eErrorSurfaceLostKHR");
154 loss = gfx_surface_loss::window_lost;
155 return std::nullopt;
156
157 default:
158 throw gui_error(std::format("Unknown result from acquireNextImageKHR(). '{}'", to_string(result)));
159 }
160}
161
162inline void gfx_surface::present_image_to_queue(uint32_t frameBufferIndex, vk::Semaphore semaphore)
163{
164 hi_axiom(gfx_system_mutex.recurse_lock_count());
165
166 hi_assert_not_null(_device);
167
168 std::array<vk::Semaphore, 1> const renderFinishedSemaphores = {semaphore};
169 std::array<vk::SwapchainKHR, 1> const presentSwapchains = {swapchain};
170 std::array<uint32_t, 1> const presentImageIndices = {frameBufferIndex};
171 hi_assert(presentSwapchains.size() == presentImageIndices.size());
172
173 try {
174 // hi_log_debug("presentQueue {}", presentImageIndices.at(0));
175 auto const result = _present_queue->queue.presentKHR(
176 {narrow_cast<uint32_t>(renderFinishedSemaphores.size()),
177 renderFinishedSemaphores.data(),
178 narrow_cast<uint32_t>(presentSwapchains.size()),
179 presentSwapchains.data(),
180 presentImageIndices.data()});
181
182 switch (result) {
183 case vk::Result::eSuccess:
184 return;
185
186 case vk::Result::eSuboptimalKHR:
187 hi_log_info("presentKHR() eSuboptimalKHR");
188 loss = gfx_surface_loss::swapchain_lost;
189 return;
190
191 default:
192 throw gui_error(std::format("Unknown result from presentKHR(). '{}'", to_string(result)));
193 }
194
195 } catch (vk::OutOfDateKHRError const&) {
196 hi_log_info("presentKHR() eErrorOutOfDateKHR");
197 loss = gfx_surface_loss::swapchain_lost;
198 return;
199
200 } catch (vk::SurfaceLostKHRError const&) {
201 hi_log_info("presentKHR() eErrorSurfaceLostKHR");
202 loss = gfx_surface_loss::window_lost;
203 return;
204 }
205}
206
207inline gfx_surface_loss gfx_surface::build_for_new_device() noexcept
208{
209 if (_device->score(intrinsic) <= 0) {
210 return gfx_surface_loss::device_lost;
211 }
212
213 box_pipeline->build_for_new_device();
214 image_pipeline->build_for_new_device();
215 SDF_pipeline->build_for_new_device();
216 override_pipeline->build_for_new_device();
217 tone_mapper_pipeline->build_for_new_device();
218
219 auto& graphics_queue = _device->get_graphics_queue(intrinsic);
220 for (auto [delegate, semaphore] : _delegates) {
221 hi_assert_not_null(delegate);
222
223 delegate->build_for_new_device(
224 _device->allocator, vulkan_instance(), _device->intrinsic, graphics_queue.queue, graphics_queue.family_queue_index);
225 }
226
227 return gfx_surface_loss::none;
228}
229
230inline gfx_surface_loss gfx_surface::build_for_new_swapchain(extent2 new_size) noexcept
231{
232 try {
233 auto const[clamped_count, clamped_size] = get_image_count_and_size(defaultNumberOfSwapchainImages, new_size);
234 if (not new_size) {
235 // Minimized window, can not build a new swap chain.
236 return gfx_surface_loss::swapchain_lost;
237 }
238
239 if (loss = build_swapchain(clamped_count, clamped_size); loss != gfx_surface_loss::none) {
240 return loss;
241 }
242
243 auto const[clamped_count_check, clamped_size_check] = get_image_count_and_size(clamped_count, clamped_size);
244 if (clamped_count_check != clamped_count or clamped_size_check != clamped_size) {
245 // Window has changed during swap chain creation, it is in a inconsistent bad state.
246 // This is a bug in the Vulkan specification.
247 teardown_swapchain();
248 return gfx_surface_loss::swapchain_lost;
249 }
250
251 build_render_passes(); // Render-pass requires the swapchain/color/depth image-format.
252 build_frame_buffers(); // Framebuffer required render passes.
253 build_command_buffers();
254 build_semaphores();
255 hi_assert_not_null(box_pipeline);
256 hi_assert_not_null(image_pipeline);
257 hi_assert_not_null(SDF_pipeline);
258 hi_assert_not_null(override_pipeline);
259 hi_assert_not_null(tone_mapper_pipeline);
260 box_pipeline->build_for_new_swapchain(renderPass, 0, swapchainImageExtent);
261 image_pipeline->build_for_new_swapchain(renderPass, 1, swapchainImageExtent);
262 SDF_pipeline->build_for_new_swapchain(renderPass, 2, swapchainImageExtent);
263 override_pipeline->build_for_new_swapchain(renderPass, 3, swapchainImageExtent);
264 tone_mapper_pipeline->build_for_new_swapchain(renderPass, 4, swapchainImageExtent);
265
266 auto image_views = std::vector<vk::ImageView>{};
267 image_views.reserve(swapchain_image_infos.size());
268 for (auto const& image_info : swapchain_image_infos) {
269 image_views.push_back(image_info.image_view);
270 }
271
272 for (auto [delegate, semaphore] : _delegates) {
273 hi_assert_not_null(delegate);
274 delegate->build_for_new_swapchain(image_views, swapchainImageExtent, swapchainImageFormat);
275 }
276
277 return gfx_surface_loss::none;
278
279 } catch (vk::SurfaceLostKHRError const&) {
280 // During swapchain build we lost the surface.
281 // This state will cause the swapchain to be teardown.
282 return gfx_surface_loss::window_lost;
283 }
284}
285
286inline void gfx_surface::build(extent2 new_size) noexcept
287{
288 hi_axiom(gfx_system_mutex.recurse_lock_count());
289 hi_assert(loss == gfx_surface_loss::none);
290
291 if (state == gfx_surface_state::has_window) {
292 if (_device) {
293 if (loss = build_for_new_device(); loss != gfx_surface_loss::none) {
294 return;
295 }
296 state = gfx_surface_state::has_device;
297 }
298 }
299
300 if (state == gfx_surface_state::has_device) {
301 if (auto const tmp = build_for_new_swapchain(new_size); tmp == gfx_surface_loss::swapchain_lost) {
302 // No new swapchain was created, state has_device is maintained.
303 return;
304
305 } else if (loss = tmp; tmp != gfx_surface_loss::none) {
306 return;
307 }
308
309 state = gfx_surface_state::has_swapchain;
310 }
311}
312
313inline void gfx_surface::teardown_for_swapchain_lost() noexcept
314{
315 hi_log_info("Tearing down because the window lost the swapchain.");
316 wait_idle();
317
318 for (auto [delegate, semaphore] : _delegates) {
319 hi_assert_not_null(delegate);
320 delegate->teardown_for_swapchain_lost();
321 }
322
323 tone_mapper_pipeline->teardown_for_swapchain_lost();
324 override_pipeline->teardown_for_swapchain_lost();
325 SDF_pipeline->teardown_for_swapchain_lost();
326 image_pipeline->teardown_for_swapchain_lost();
327 box_pipeline->teardown_for_swapchain_lost();
328 teardown_semaphores();
329 teardown_command_buffers();
330 teardown_frame_buffers();
331 teardown_render_passes();
332 teardown_swapchain();
333}
334
335inline void gfx_surface::teardown_for_device_lost() noexcept
336{
337 hi_log_info("Tearing down because the window lost the vulkan device.");
338 for (auto [delegate, semaphore] : _delegates) {
339 hi_assert_not_null(delegate);
340 delegate->teardown_for_device_lost();
341 }
342 tone_mapper_pipeline->teardown_for_device_lost();
343 override_pipeline->teardown_for_device_lost();
344 SDF_pipeline->teardown_for_device_lost();
345 image_pipeline->teardown_for_device_lost();
346 box_pipeline->teardown_for_device_lost();
347 _device = nullptr;
348}
349
350inline void gfx_surface::teardown_for_window_lost() noexcept
351{
352 gfx_system::global().destroySurfaceKHR(intrinsic);
353}
354
355inline void gfx_surface::teardown() noexcept
356{
357 hi_axiom(gfx_system_mutex.recurse_lock_count());
358
359 if (state == gfx_surface_state::has_swapchain and loss >= gfx_surface_loss::swapchain_lost) {
360 teardown_for_swapchain_lost();
361 state = gfx_surface_state::has_device;
362 }
363
364 if (state == gfx_surface_state::has_device and loss >= gfx_surface_loss::device_lost) {
365 teardown_for_device_lost();
366 state = gfx_surface_state::has_window;
367 }
368
369 if (state == gfx_surface_state::has_window and loss >= gfx_surface_loss::window_lost) {
370 hi_log_info("Tearing down because the window doesn't exist anymore.");
371 teardown_for_window_lost();
372 state = gfx_surface_state::no_window;
373 }
374 loss = gfx_surface_loss::none;
375}
376
377inline void gfx_surface::update(extent2 new_size) noexcept
378{
379 auto const lock = std::scoped_lock(gfx_system_mutex);
380
381 if (size() != new_size and state == gfx_surface_state::has_swapchain) {
382 // On resize lose the swapchain, which will be cleaned up at teardown().
383 loss = gfx_surface_loss::swapchain_lost;
384 }
385
386 // Tear down then buildup from the Vulkan objects that where invalid.
387 teardown();
388 build(new_size);
389}
390
391inline draw_context gfx_surface::render_start(aarectangle redraw_rectangle)
392{
393 // Extent the redraw_rectangle to the render-area-granularity to improve performance on tile based GPUs.
394 redraw_rectangle = ceil(redraw_rectangle, _render_area_granularity);
395
396 auto const lock = std::scoped_lock(gfx_system_mutex);
397
398 auto r = draw_context{
399 *_device,
400 box_pipeline->vertexBufferData,
401 image_pipeline->vertexBufferData,
402 SDF_pipeline->vertexBufferData,
403 override_pipeline->vertexBufferData};
404
405 // Bail out when the window is not yet ready to be rendered, or if there is nothing to render.
406 if (state != gfx_surface_state::has_swapchain or not redraw_rectangle) {
407 return r;
408 }
409
410 auto const optional_frame_buffer_index = acquire_next_image_from_swapchain();
411 if (!optional_frame_buffer_index) {
412 // No image is ready to be rendered, yet, possibly because our vertical sync function
413 // is not working correctly.
414 return r;
415 }
416
417 // Setting the frame buffer index, also enabled the draw_context.
418 r.frame_buffer_index = narrow_cast<size_t>(*optional_frame_buffer_index);
419
420 // Record which part of the image will be redrawn on the current swapchain image.
421 auto& current_image = swapchain_image_infos.at(r.frame_buffer_index);
422 current_image.redraw_rectangle = redraw_rectangle;
423
424 // Calculate the scissor rectangle, from the combined redraws of the complete swapchain.
425 // We need to do this so that old redraws are also executed in the current swapchain image.
426 r.scissor_rectangle =
427 std::accumulate(swapchain_image_infos.cbegin(), swapchain_image_infos.cend(), aarectangle{}, [](auto const& sum, auto const& item) {
428 return sum | item.redraw_rectangle;
429 });
430
431 // Wait until previous rendering has finished, before the next rendering.
432 _device->waitForFences({renderFinishedFence}, VK_TRUE, std::numeric_limits<uint64_t>::max());
433
434 // Unsignal the fence so we will not modify/destroy the command buffers during rendering.
435 _device->resetFences({renderFinishedFence});
436
437 return r;
438}
439
440inline void gfx_surface::render_finish(draw_context const& context)
441{
442 auto const lock = std::scoped_lock(gfx_system_mutex);
443
444 auto& current_image = swapchain_image_infos.at(context.frame_buffer_index);
445
446 // Because we use a scissor/render_area, the image from the swapchain around the scissor-area is reused.
447 // Because of reuse the swapchain image must already be in the "ePresentSrcKHR" layout.
448 // The swapchain creates images in undefined layout, so we need to change the layout once.
449 if (not current_image.layout_is_present) {
450 _device->transition_layout(
451 current_image.image, swapchainImageFormat.format, vk::ImageLayout::eUndefined, vk::ImageLayout::ePresentSrcKHR);
452
453 current_image.layout_is_present = true;
454 }
455
456 // Clamp the scissor rectangle to the size of the window.
457 auto const clamped_scissor_rectangle = intersect(
458 context.scissor_rectangle,
459 aarectangle{0, 0, narrow_cast<float>(swapchainImageExtent.width), narrow_cast<float>(swapchainImageExtent.height)});
460
461 auto const render_area = vk::Rect2D{
462 vk::Offset2D(
463 round_cast<uint32_t>(clamped_scissor_rectangle.left()),
464 round_cast<uint32_t>(
465 swapchainImageExtent.height - clamped_scissor_rectangle.bottom() - clamped_scissor_rectangle.height())),
466 vk::Extent2D(
467 round_cast<uint32_t>(clamped_scissor_rectangle.width()), round_cast<uint32_t>(clamped_scissor_rectangle.height()))};
468
469 // Start the first delegate when the swapchain-image becomes available.
470 auto start_semaphore = imageAvailableSemaphore;
471 for (auto [delegate, end_semaphore] : _delegates) {
472 hi_assert_not_null(delegate);
473
474 delegate->draw(narrow_cast<uint32_t>(context.frame_buffer_index), start_semaphore, end_semaphore, render_area);
475 start_semaphore = end_semaphore;
476 }
477
478 // Wait for the semaphore of the last delegate before it will write into the swapchain-image.
479 fill_command_buffer(current_image, context, render_area);
480 submit_command_buffer(start_semaphore);
481
482 // Signal the fence when all rendering has finished on the graphics queue.
483 // When the fence is signaled we can modify/destroy the command buffers.
484 [[maybe_unused]] auto const submit_result = _graphics_queue->queue.submit(0, nullptr, renderFinishedFence);
485
486 present_image_to_queue(narrow_cast<uint32_t>(context.frame_buffer_index), renderFinishedSemaphore);
487
488 // Do an early tear down of invalid vulkan objects.
489 teardown();
490}
491
492inline void gfx_surface::fill_command_buffer(
493 swapchain_image_info const& current_image,
494 draw_context const& context,
495 vk::Rect2D render_area)
496{
497 hi_axiom(gfx_system_mutex.recurse_lock_count());
498
499 auto t = trace<"fill_command_buffer">{};
500
501 commandBuffer.reset(vk::CommandBufferResetFlagBits::eReleaseResources);
502 commandBuffer.begin({vk::CommandBufferUsageFlagBits::eSimultaneousUse});
503
504 auto const background_color_f32x4 = f32x4{1.0f, 0.0f, 0.0f, 1.0f};
505 auto const background_color_array = static_cast<std::array<float, 4>>(background_color_f32x4);
506
507 auto const colorClearValue = vk::ClearColorValue{background_color_array};
508 auto const sdfClearValue = vk::ClearColorValue{std::array{0.0f, 0.0f, 0.0f, 0.0f}};
509 auto const depthClearValue = vk::ClearDepthStencilValue{0.0, 0};
510 auto const clearValues = std::array{
511 vk::ClearValue{depthClearValue},
512 vk::ClearValue{colorClearValue},
513 vk::ClearValue{sdfClearValue},
514 vk::ClearValue{colorClearValue}};
515
516 // The scissor and render area makes sure that the frame buffer is not modified where we are not drawing the widgets.
517 auto const scissors = std::array{render_area};
518 commandBuffer.setScissor(0, scissors);
519
520 commandBuffer.beginRenderPass(
521 {renderPass, current_image.frame_buffer, render_area, narrow_cast<uint32_t>(clearValues.size()), clearValues.data()},
522 vk::SubpassContents::eInline);
523
524 box_pipeline->draw_in_command_buffer(commandBuffer, context);
525 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
526 image_pipeline->draw_in_command_buffer(commandBuffer, context);
527 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
528 SDF_pipeline->draw_in_command_buffer(commandBuffer, context);
529 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
530 override_pipeline->draw_in_command_buffer(commandBuffer, context);
531 commandBuffer.nextSubpass(vk::SubpassContents::eInline);
532 tone_mapper_pipeline->draw_in_command_buffer(commandBuffer, context);
533
534 commandBuffer.endRenderPass();
535 commandBuffer.end();
536}
537
538inline void gfx_surface::submit_command_buffer(vk::Semaphore delegate_semaphore)
539{
540 hi_axiom(gfx_system_mutex.recurse_lock_count());
541
542 auto const waitSemaphores = std::array{delegate_semaphore};
543
544 auto const waitStages = std::array{vk::PipelineStageFlags{vk::PipelineStageFlagBits::eColorAttachmentOutput}};
545
546 hi_assert(waitSemaphores.size() == waitStages.size());
547
548 auto const signalSemaphores = std::array{renderFinishedSemaphore};
549 auto const commandBuffersToSubmit = std::array{commandBuffer};
550
551 auto const submitInfo = std::array{vk::SubmitInfo{
552 narrow_cast<uint32_t>(waitSemaphores.size()),
553 waitSemaphores.data(),
554 waitStages.data(),
555 narrow_cast<uint32_t>(commandBuffersToSubmit.size()),
556 commandBuffersToSubmit.data(),
557 narrow_cast<uint32_t>(signalSemaphores.size()),
558 signalSemaphores.data()}};
559
560 _graphics_queue->queue.submit(submitInfo, vk::Fence());
561}
562
563inline std::tuple<std::size_t, extent2> gfx_surface::get_image_count_and_size(std::size_t new_count, extent2 new_size)
564{
565 hi_axiom(gfx_system_mutex.recurse_lock_count());
566
567 auto const surfaceCapabilities = _device->getSurfaceCapabilitiesKHR(intrinsic);
568
569 auto const min_count = narrow_cast<std::size_t>(surfaceCapabilities.minImageCount);
570 auto const max_count = narrow_cast<std::size_t>(surfaceCapabilities.maxImageCount ? surfaceCapabilities.maxImageCount : 3);
571 auto const clamped_count = std::clamp(new_count, min_count, max_count);
572 hi_log_info(
573 "gfx_surface min_count={}, max_count={}, requested_count={}, count={}", min_count, max_count, new_count, clamped_count);
574
575 // minImageExtent and maxImageExtent are always valid. currentImageExtent may be 0xffffffff.
576 auto const min_size = extent2{
577 narrow_cast<float>(surfaceCapabilities.minImageExtent.width),
578 narrow_cast<float>(surfaceCapabilities.minImageExtent.height)};
579 auto const max_size = extent2{
580 narrow_cast<float>(surfaceCapabilities.maxImageExtent.width),
581 narrow_cast<float>(surfaceCapabilities.maxImageExtent.height)};
582 auto const clamped_size = clamp(new_size, min_size, max_size);
583
584 hi_log_info("gfx_surface min_size={}, max_size={}, requested_size={}, size={}", min_size, max_size, new_size, clamped_size);
585 return {clamped_count, clamped_size};
586}
587
588inline gfx_surface_loss gfx_surface::build_swapchain(std::size_t new_count, extent2 new_size)
589{
590 hi_axiom(gfx_system_mutex.recurse_lock_count());
591
592 hi_log_info("Building swap chain");
593
594 auto const sharingMode = _graphics_queue == _present_queue ? vk::SharingMode::eExclusive : vk::SharingMode::eConcurrent;
595
596 std::array<uint32_t, 2> const sharingQueueFamilyAllIndices = {
597 _graphics_queue->family_queue_index, _present_queue->family_queue_index};
598
599 swapchainImageFormat = _device->get_surface_format(intrinsic);
600 nrSwapchainImages = narrow_cast<uint32_t>(new_count);
601 swapchainImageExtent = VkExtent2D{round_cast<uint32_t>(new_size.width()), round_cast<uint32_t>(new_size.height())};
602 vk::SwapchainCreateInfoKHR swapchainCreateInfo{
603 vk::SwapchainCreateFlagsKHR(),
604 intrinsic,
605 nrSwapchainImages,
606 swapchainImageFormat.format,
607 swapchainImageFormat.colorSpace,
608 swapchainImageExtent,
609 1, // imageArrayLayers
610 vk::ImageUsageFlagBits::eColorAttachment,
611 sharingMode,
612 sharingMode == vk::SharingMode::eConcurrent ? narrow_cast<uint32_t>(sharingQueueFamilyAllIndices.size()) : 0,
613 sharingMode == vk::SharingMode::eConcurrent ? sharingQueueFamilyAllIndices.data() : nullptr,
614 vk::SurfaceTransformFlagBitsKHR::eIdentity,
615 vk::CompositeAlphaFlagBitsKHR::eOpaque,
616 _device->get_present_mode(intrinsic),
617 VK_TRUE, // clipped
618 nullptr};
619
620 vk::Result const result = _device->createSwapchainKHR(&swapchainCreateInfo, nullptr, &swapchain);
621 switch (result) {
622 case vk::Result::eSuccess:
623 break;
624
625 case vk::Result::eErrorSurfaceLostKHR:
626 return gfx_surface_loss::window_lost;
627
628 default:
629 throw gui_error(std::format("Unknown result from createSwapchainKHR(). '{}'", to_string(result)));
630 }
631
632 hi_log_info("Finished building swap chain");
633 hi_log_info(" - extent=({}, {})", swapchainCreateInfo.imageExtent.width, swapchainCreateInfo.imageExtent.height);
634 hi_log_info(
635 " - colorSpace={}, format={}",
636 vk::to_string(swapchainCreateInfo.imageColorSpace),
637 vk::to_string(swapchainCreateInfo.imageFormat));
638 hi_log_info(
639 " - presentMode={}, imageCount={}", vk::to_string(swapchainCreateInfo.presentMode), swapchainCreateInfo.minImageCount);
640
641 // Create depth matching the swapchain.
642 vk::ImageCreateInfo const depthImageCreateInfo = {
643 vk::ImageCreateFlags(),
644 vk::ImageType::e2D,
645 depthImageFormat,
646 vk::Extent3D(swapchainCreateInfo.imageExtent.width, swapchainCreateInfo.imageExtent.height, 1),
647 1, // mipLevels
648 1, // arrayLayers
649 vk::SampleCountFlagBits::e1,
650 vk::ImageTiling::eOptimal,
651 vk::ImageUsageFlagBits::eDepthStencilAttachment | _device->transientImageUsageFlags,
652 vk::SharingMode::eExclusive,
653 0,
654 nullptr,
655 vk::ImageLayout::eUndefined};
656
657 VmaAllocationCreateInfo depthAllocationCreateInfo = {};
658 depthAllocationCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
659 depthAllocationCreateInfo.pUserData = const_cast<char *>("vk::Image depth attachment");
660 depthAllocationCreateInfo.usage = _device->lazyMemoryUsage;
661 std::tie(depthImage, depthImageAllocation) = _device->createImage(depthImageCreateInfo, depthAllocationCreateInfo);
662 _device->setDebugUtilsObjectNameEXT(depthImage, "vk::Image depth attachment");
663
664 // Create color image matching the swapchain.
665 vk::ImageCreateInfo const colorImageCreateInfo = {
666 vk::ImageCreateFlags(),
667 vk::ImageType::e2D,
668 colorImageFormat,
669 vk::Extent3D(swapchainCreateInfo.imageExtent.width, swapchainCreateInfo.imageExtent.height, 1),
670 1, // mipLevels
671 1, // arrayLayers
672 vk::SampleCountFlagBits::e1,
673 vk::ImageTiling::eOptimal,
674 vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eInputAttachment | _device->transientImageUsageFlags,
675 vk::SharingMode::eExclusive,
676 0,
677 nullptr,
678 vk::ImageLayout::eUndefined};
679
680 VmaAllocationCreateInfo colorAllocationCreateInfo = {};
681 colorAllocationCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT;
682 colorAllocationCreateInfo.pUserData = const_cast<char *>("vk::Image color attachment");
683 colorAllocationCreateInfo.usage = _device->lazyMemoryUsage;
684
685 std::tie(colorImages[0], colorImageAllocations[0]) = _device->createImage(colorImageCreateInfo, colorAllocationCreateInfo);
686 _device->setDebugUtilsObjectNameEXT(colorImages[0], "vk::Image color attachment");
687
688 return gfx_surface_loss::none;
689}
690
691inline void gfx_surface::teardown_swapchain()
692{
693 hi_axiom(gfx_system_mutex.recurse_lock_count());
694
695 _device->destroy(swapchain);
696 _device->destroyImage(depthImage, depthImageAllocation);
697
698 for (std::size_t i = 0; i != colorImages.size(); ++i) {
699 _device->destroyImage(colorImages[i], colorImageAllocations[i]);
700 }
701}
702
703inline void gfx_surface::build_frame_buffers()
704{
705 hi_axiom(gfx_system_mutex.recurse_lock_count());
706
707 depthImageView = _device->createImageView(
708 {vk::ImageViewCreateFlags(),
709 depthImage,
710 vk::ImageViewType::e2D,
711 depthImageFormat,
712 vk::ComponentMapping(),
713 {vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1}});
714
715 for (std::size_t i = 0; i != colorImageViews.size(); ++i) {
716 colorImageViews[i] = _device->createImageView(
717 {vk::ImageViewCreateFlags(),
718 colorImages[i],
719 vk::ImageViewType::e2D,
720 colorImageFormat,
721 vk::ComponentMapping(),
722 {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}});
723
724 colorDescriptorImageInfos[i] = {vk::Sampler(), colorImageViews[i], vk::ImageLayout::eShaderReadOnlyOptimal};
725 }
726
727 auto swapchain_images = _device->getSwapchainImagesKHR(swapchain);
728 for (auto image : swapchain_images) {
729 auto image_view = _device->createImageView(
730 {vk::ImageViewCreateFlags(),
731 image,
732 vk::ImageViewType::e2D,
733 swapchainImageFormat.format,
734 vk::ComponentMapping(),
735 {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1}});
736
737 auto const attachments = std::array{depthImageView, colorImageViews[0], image_view};
738
739 auto const frame_buffer = _device->createFramebuffer({
740 vk::FramebufferCreateFlags(),
741 renderPass,
742 narrow_cast<uint32_t>(attachments.size()),
743 attachments.data(),
744 swapchainImageExtent.width,
745 swapchainImageExtent.height,
746 1 // layers
747 });
748
749 swapchain_image_infos.emplace_back(
750 std::move(image), std::move(image_view), std::move(frame_buffer), aarectangle{}, false);
751 }
752
753 hi_assert(swapchain_image_infos.size() == swapchain_images.size());
754}
755
756inline void gfx_surface::teardown_frame_buffers()
757{
758 hi_axiom(gfx_system_mutex.recurse_lock_count());
759
760 for (auto& info : swapchain_image_infos) {
761 _device->destroy(info.frame_buffer);
762 _device->destroy(info.image_view);
763 }
764 swapchain_image_infos.clear();
765
766 _device->destroy(depthImageView);
767 for (std::size_t i = 0; i != colorImageViews.size(); ++i) {
768 _device->destroy(colorImageViews[i]);
769 }
770}
771
784inline void gfx_surface::build_render_passes()
785{
786 hi_axiom(gfx_system_mutex.recurse_lock_count());
787
788 auto const attachment_descriptions = std::array{
789 vk::AttachmentDescription{
790 // Depth attachment
791 vk::AttachmentDescriptionFlags(),
792 depthImageFormat,
793 vk::SampleCountFlagBits::e1,
794 vk::AttachmentLoadOp::eClear,
795 vk::AttachmentStoreOp::eDontCare,
796 vk::AttachmentLoadOp::eDontCare, // stencilLoadOp
797 vk::AttachmentStoreOp::eDontCare, // stencilStoreOp
798 vk::ImageLayout::eUndefined, // initialLayout
799 vk::ImageLayout::eDepthStencilAttachmentOptimal // finalLayout
800 },
801 vk::AttachmentDescription{
802 // Color attachment
803 vk::AttachmentDescriptionFlags(),
804 colorImageFormat,
805 vk::SampleCountFlagBits::e1,
806 vk::AttachmentLoadOp::eClear,
807 vk::AttachmentStoreOp::eDontCare,
808 vk::AttachmentLoadOp::eDontCare, // stencilLoadOp
809 vk::AttachmentStoreOp::eDontCare, // stencilStoreOp
810 vk::ImageLayout::eUndefined, // initialLayout
811 vk::ImageLayout::eColorAttachmentOptimal // finalLayout
812 },
813 vk::AttachmentDescription{
814 // Swapchain attachment.
815 vk::AttachmentDescriptionFlags(),
816 swapchainImageFormat.format,
817 vk::SampleCountFlagBits::e1,
818 vk::AttachmentLoadOp::eLoad,
819 vk::AttachmentStoreOp::eStore,
820 vk::AttachmentLoadOp::eDontCare, // stencilLoadOp
821 vk::AttachmentStoreOp::eDontCare, // stencilStoreOp
822 vk::ImageLayout::ePresentSrcKHR, // initialLayout
823 vk::ImageLayout::ePresentSrcKHR // finalLayout
824 }};
825
826 auto const depth_attachment_reference = vk::AttachmentReference{0, vk::ImageLayout::eDepthStencilAttachmentOptimal};
827 auto const color_attachment_references = std::array{vk::AttachmentReference{1, vk::ImageLayout::eColorAttachmentOptimal}};
828 auto const color_input_attachment_references = std::array{vk::AttachmentReference{1, vk::ImageLayout::eShaderReadOnlyOptimal}};
829 auto const swapchain_attachment_references = std::array{vk::AttachmentReference{2, vk::ImageLayout::eColorAttachmentOptimal}};
830
831 auto const subpass_descriptions = std::array{
832 vk::SubpassDescription{
833 vk::SubpassDescriptionFlags(), // Subpass 0 Box
834 vk::PipelineBindPoint::eGraphics,
835 0, // inputAttchmentReferencesCount
836 nullptr, // inputAttachmentReferences
837 narrow_cast<uint32_t>(color_attachment_references.size()),
838 color_attachment_references.data(),
839 nullptr, // resolveAttachments
840 &depth_attachment_reference
841
842 },
843 vk::SubpassDescription{
844 vk::SubpassDescriptionFlags(), // Subpass 1 Image
845 vk::PipelineBindPoint::eGraphics,
846 0, // inputAttchmentReferencesCount
847 nullptr, // inputAttachmentReferences
848 narrow_cast<uint32_t>(color_attachment_references.size()),
849 color_attachment_references.data(),
850 nullptr, // resolveAttachments
851 &depth_attachment_reference
852
853 },
854 vk::SubpassDescription{
855 vk::SubpassDescriptionFlags(), // Subpass 2 SDF
856 vk::PipelineBindPoint::eGraphics,
857 0,
858 nullptr,
859 narrow_cast<uint32_t>(color_attachment_references.size()),
860 color_attachment_references.data(),
861 nullptr, // resolveAttachments
862 &depth_attachment_reference
863
864 },
865 vk::SubpassDescription{
866 vk::SubpassDescriptionFlags(), // Subpass 3 alpha
867 vk::PipelineBindPoint::eGraphics,
868 0,
869 nullptr,
870 narrow_cast<uint32_t>(color_attachment_references.size()),
871 color_attachment_references.data(),
872 nullptr, // resolveAttachments
873 &depth_attachment_reference
874
875 },
876 vk::SubpassDescription{
877 vk::SubpassDescriptionFlags(), // Subpass 4 tone-mapper
878 vk::PipelineBindPoint::eGraphics,
879 narrow_cast<uint32_t>(color_input_attachment_references.size()),
880 color_input_attachment_references.data(),
881 narrow_cast<uint32_t>(swapchain_attachment_references.size()),
882 swapchain_attachment_references.data(),
883 nullptr,
884 nullptr}};
885
886 auto const subpass_dependency = std::array{
887 vk::SubpassDependency{
888 VK_SUBPASS_EXTERNAL,
889 0,
890 vk::PipelineStageFlagBits::eBottomOfPipe,
891 vk::PipelineStageFlagBits::eColorAttachmentOutput,
892 vk::AccessFlagBits::eMemoryRead,
893 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
894 vk::DependencyFlagBits::eByRegion},
895 // Subpass 0: Render shaded polygons to color+depth with fixed function alpha compositing
896 vk::SubpassDependency{
897 0,
898 1,
899 vk::PipelineStageFlagBits::eColorAttachmentOutput,
900 vk::PipelineStageFlagBits::eColorAttachmentOutput,
901 vk::AccessFlagBits::eColorAttachmentWrite,
902 vk::AccessFlagBits::eColorAttachmentRead,
903 vk::DependencyFlagBits::eByRegion},
904 // Subpass 1: Render texture mapped polygons to color+depth with fixed function alpha compositing
905 vk::SubpassDependency{
906 1,
907 2,
908 vk::PipelineStageFlagBits::eColorAttachmentOutput,
909 vk::PipelineStageFlagBits::eColorAttachmentOutput,
910 vk::AccessFlagBits::eColorAttachmentWrite,
911 vk::AccessFlagBits::eColorAttachmentRead,
912 vk::DependencyFlagBits::eByRegion},
913 // Subpass 2: Render SDF-texture mapped polygons to color+depth with fixed function alpha compositing
914 vk::SubpassDependency{
915 2,
916 3,
917 vk::PipelineStageFlagBits::eColorAttachmentOutput,
918 vk::PipelineStageFlagBits::eFragmentShader,
919 vk::AccessFlagBits::eColorAttachmentWrite,
920 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eInputAttachmentRead,
921 vk::DependencyFlagBits::eByRegion},
922 // Subpass 3: Render alpha polygons to color+depth with alpha override
923 vk::SubpassDependency{
924 3,
925 4,
926 vk::PipelineStageFlagBits::eColorAttachmentOutput,
927 vk::PipelineStageFlagBits::eFragmentShader,
928 vk::AccessFlagBits::eColorAttachmentWrite,
929 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eInputAttachmentRead,
930 vk::DependencyFlagBits::eByRegion},
931 // Subpass 4: Tone mapping color to swapchain.
932 vk::SubpassDependency{
933 4,
934 VK_SUBPASS_EXTERNAL,
935 vk::PipelineStageFlagBits::eColorAttachmentOutput,
936 vk::PipelineStageFlagBits::eBottomOfPipe,
937 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
938 vk::AccessFlagBits::eMemoryRead,
939 vk::DependencyFlagBits::eByRegion}};
940
941 vk::RenderPassCreateInfo const render_pass_create_info = {
942 vk::RenderPassCreateFlags(),
943 narrow_cast<uint32_t>(attachment_descriptions.size()), // attachmentCount
944 attachment_descriptions.data(), // attachments
945 narrow_cast<uint32_t>(subpass_descriptions.size()), // subpassCount
946 subpass_descriptions.data(), // subpasses
947 narrow_cast<uint32_t>(subpass_dependency.size()), // dependencyCount
948 subpass_dependency.data() // dependencies
949 };
950
951 renderPass = _device->createRenderPass(render_pass_create_info);
952 auto const granularity = _device->getRenderAreaGranularity(renderPass);
953 _render_area_granularity = extent2{narrow_cast<float>(granularity.width), narrow_cast<float>(granularity.height)};
954}
955
956inline void gfx_surface::teardown_render_passes()
957{
958 hi_axiom(gfx_system_mutex.recurse_lock_count());
959
960 _device->destroy(renderPass);
961}
962
963inline void gfx_surface::build_semaphores()
964{
965 hi_axiom(gfx_system_mutex.recurse_lock_count());
966
967 imageAvailableSemaphore = _device->createSemaphore();
968 renderFinishedSemaphore = _device->createSemaphore();
969
970 // This fence is used to wait for the Window and its Pipelines to be idle.
971 // It should therefor be signed at the start so that when no rendering has been
972 // done it is still idle.
973 renderFinishedFence = _device->createFence({vk::FenceCreateFlagBits::eSignaled});
974}
975
976inline void gfx_surface::teardown_semaphores()
977{
978 hi_axiom(gfx_system_mutex.recurse_lock_count());
979
980 _device->destroy(renderFinishedSemaphore);
981 _device->destroy(imageAvailableSemaphore);
982 _device->destroy(renderFinishedFence);
983}
984
985inline void gfx_surface::build_command_buffers()
986{
987 hi_axiom(gfx_system_mutex.recurse_lock_count());
988
989 auto const commandBuffers = _device->allocateCommandBuffers({_graphics_queue->command_pool, vk::CommandBufferLevel::ePrimary, 1});
990
991 commandBuffer = commandBuffers.at(0);
992}
993
994inline void gfx_surface::teardown_command_buffers()
995{
996 hi_axiom(gfx_system_mutex.recurse_lock_count());
997 auto const commandBuffers = std::vector<vk::CommandBuffer>{commandBuffer};
998
999 _device->freeCommandBuffers(_graphics_queue->command_pool, commandBuffers);
1000}
1001
1002[[nodiscard]] inline std::unique_ptr<gfx_surface> make_unique_gfx_surface(os_handle instance, void *os_window)
1003{
1004 auto const lock = std::scoped_lock(gfx_system_mutex);
1005
1006 auto surface_create_info = vk::Win32SurfaceCreateInfoKHR{
1007 vk::Win32SurfaceCreateFlagsKHR(), reinterpret_cast<HINSTANCE>(instance), reinterpret_cast<HWND>(os_window)};
1008
1009 auto vulkan_surface = vulkan_instance().createWin32SurfaceKHR(surface_create_info);
1010
1011 auto surface = std::make_unique<gfx_surface>(vulkan_surface);
1012
1013 // Now that we have a physical window and render surface it is time to find the gfx-device
1014 // for rendering on this surface.
1015 auto device = find_best_device(*surface);
1016 if (not device) {
1017 throw gfx_error("Could not find a vulkan-device matching this surface");
1018 }
1019 surface->set_device(device);
1020
1021 return surface;
1022}
1023
1024} // namespace hi::inline v1
DOXYGEN BUG.
Definition algorithm_misc.hpp:20
gfx_device * find_best_device(gfx_surface const &surface)
Find the best device for a surface.
Definition gfx_surface_vulkan_intf.hpp:191
unfair_recursive_mutex gfx_system_mutex
Global mutex for GUI elements, like gfx_system, gfx_device, Windows and Widgets.
Definition gfx_system_globals.hpp:18
gfx_surface_loss
Definition gfx_surface_state.hpp:20
Definition gfx_device_vulkan_intf.hpp:26
A delegate for drawing on a window below the HikoGUI user interface.
Definition gfx_surface_delegate_vulkan.hpp:24
T accumulate(T... args)
T addressof(T... args)
T ceil(T... args)
T data(T... args)
T end(T... args)
T find_if(T... args)
T lock(T... args)
T max(T... args)
T move(T... args)
T reserve(T... args)
T size(T... args)
T tie(T... args)
T to_string(T... args)