HikoGUI
A low latency retained GUI
Loading...
Searching...
No Matches
unfair_mutex_impl.hpp
1// Copyright Take Vos 2020-2022.
2// Distributed under the Boost Software License, Version 1.0.
3// (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
4
9#pragma once
10
11#include "unfair_mutex_intf.hpp"
12#include "global_state.hpp"
13#include "../utility/utility.hpp"
14#include "../macros.hpp"
15#include <atomic>
16#include <memory>
17#include <format>
18
19hi_export_module(hikogui.concurrency.unfair_mutex : impl);
20
21namespace hi { inline namespace v1 {
22namespace detail {
23
24inline unfair_mutex_impl<false> unfair_mutex_deadlock_mutex;
25
26thread_local inline std::vector<void *> unfair_mutex_deadlock_stack;
27
33inline std::vector<std::pair<void *, void *>> unfair_mutex_deadlock_lock_graph;
34
35[[nodiscard]] inline void *unfair_mutex_deadlock_check_graph(void *object) noexcept
36{
37 hi_assert_not_null(object);
38
39 hilet lock = std::scoped_lock(detail::unfair_mutex_deadlock_mutex);
40
41 for (hilet before : unfair_mutex_deadlock_stack) {
42 auto correct_order = std::make_pair(before, object);
43 hilet reverse_order = std::make_pair(object, before);
44
46 unfair_mutex_deadlock_lock_graph.cbegin(), unfair_mutex_deadlock_lock_graph.cend(), correct_order)) {
47 // The object has been locked in the correct order in comparison to `before`.
48 continue;
49 }
50
52 unfair_mutex_deadlock_lock_graph.cbegin(), unfair_mutex_deadlock_lock_graph.cend(), reverse_order)) {
53 // The object has been locked in reverse order in comparison to `before`.
54 return before;
55 }
56
57 // Insert the new 'correct' order in the sorted lock_graph.
58 hilet it =
59 std::upper_bound(unfair_mutex_deadlock_lock_graph.cbegin(), unfair_mutex_deadlock_lock_graph.cend(), correct_order);
60 unfair_mutex_deadlock_lock_graph.insert(it, std::move(correct_order));
61 }
62 return nullptr;
63}
64
65} // namespace detail
66
72hi_export inline void *unfair_mutex_deadlock_lock(void *object) noexcept
73{
75 // thread_local variables used by `stack` do not work on MSVC after main() returns.
76 return nullptr;
77 }
78
79 hi_assert_not_null(object);
80
81 if (std::count(detail::unfair_mutex_deadlock_stack.begin(), detail::unfair_mutex_deadlock_stack.end(), object) != 0) {
82 // `object` already locked by the current thread.
83 return object;
84 }
85
86 if (auto before = detail::unfair_mutex_deadlock_check_graph(object)) {
87 // Trying to lock `object` after `before` in previously reversed order
88 return before;
89 }
90
91 detail::unfair_mutex_deadlock_stack.push_back(object);
92 return nullptr;
93}
94
99hi_export inline bool unfair_mutex_deadlock_unlock(void *object) noexcept
100{
102 // thread_local variables used by `stack` do not work on MSVC when main() returns.
103 return true;
104 }
105
106 hi_assert_not_null(object);
107
108 // Trying to unlock `object`, but nothing on this thread was locked.
109 if (detail::unfair_mutex_deadlock_stack.empty()) {
110 return false;
111 }
112
113 // Trying to unlock `object`, but unlocking in different order.
114 if (detail::unfair_mutex_deadlock_stack.back() != object) {
115 return false;
116 }
117
118 detail::unfair_mutex_deadlock_stack.pop_back();
119 return true;
120}
121
128hi_export inline void unfair_mutex_deadlock_remove_object(void *object) noexcept
129{
130 hi_assert_not_null(object);
131
133 // thread_local variables used by `lock_graph` do not work on MSVC when main() returns.
134 return;
135 }
136
137 hilet lock = std::scoped_lock(detail::unfair_mutex_deadlock_mutex);
138 std::erase_if(detail::unfair_mutex_deadlock_lock_graph, [object](hilet& item) {
139 return item.first == object or item.second == object;
140 });
141}
142
147{
149 // thread_local variables used by `stack` do not work on MSVC when main() returns.
150 return;
151 }
152
153 detail::unfair_mutex_deadlock_stack.clear();
154}
155
160{
162 // thread_local variables used by `lock_graph` do not work on MSVC when main() returns.
163 return;
164 }
165
166 hilet lock = std::scoped_lock(detail::unfair_mutex_deadlock_mutex);
167 detail::unfair_mutex_deadlock_lock_graph.clear();
168}
169
170template<bool UseDeadLockDetector>
171inline unfair_mutex_impl<UseDeadLockDetector>::~unfair_mutex_impl()
172{
173 hi_axiom(not is_locked());
174 if constexpr (UseDeadLockDetector) {
176 }
177}
178
179template<bool UseDeadLockDetector>
180inline bool unfair_mutex_impl<UseDeadLockDetector>::is_locked() const noexcept
181{
182 return semaphore.load(std::memory_order::relaxed) != 0;
183}
184
185template<bool UseDeadLockDetector>
186inline void unfair_mutex_impl<UseDeadLockDetector>::lock() noexcept
187{
188 if constexpr (UseDeadLockDetector) {
189 hilet other = unfair_mutex_deadlock_lock(this);
190 hi_assert(other != this, "This mutex is already locked.");
191 hi_assert(other == nullptr, "Potential dead-lock because of different lock ordering of mutexes.");
192 }
193
194 hi_axiom(holds_invariant());
195
196 // Switch to 1 means there are no waiters.
197 semaphore_value_type expected = 0;
198 if (not semaphore.compare_exchange_strong(expected, 1, std::memory_order::acquire)) {
199 [[unlikely]] lock_contended(expected);
200 }
201
202 hi_axiom(holds_invariant());
203}
204
212template<bool UseDeadLockDetector>
214{
215 if constexpr (UseDeadLockDetector) {
216 hilet other = unfair_mutex_deadlock_lock(this);
217 hi_assert(other != this, "This mutex is already locked.");
218 hi_assert(other == nullptr, "Potential dead-lock because of different lock ordering of mutexes.");
219 }
220
221 hi_axiom(holds_invariant());
222
223 // Switch to 1 means there are no waiters.
224 semaphore_value_type expected = 0;
225 if (not semaphore.compare_exchange_strong(expected, 1, std::memory_order::acquire)) {
226 hi_axiom(holds_invariant());
227
228 if constexpr (UseDeadLockDetector) {
229 hi_assert(unfair_mutex_deadlock_unlock(this), "Unlock is not done in reverse order.");
230 }
231
232 [[unlikely]] return false;
233 }
234
235 hi_axiom(holds_invariant());
236 return true;
237}
238
239template<bool UseDeadLockDetector>
241{
242 if constexpr (UseDeadLockDetector) {
243 hi_assert(unfair_mutex_deadlock_unlock(this), "Unlock is not done in reverse order.");
244 }
245
246 hi_axiom(holds_invariant());
247
248 if (semaphore.fetch_sub(1, std::memory_order::relaxed) != 1) {
249 [[unlikely]] semaphore.store(0, std::memory_order::release);
250
251 semaphore.notify_one();
252 } else {
253 atomic_thread_fence(std::memory_order::release);
254 }
255
256 hi_axiom(holds_invariant());
257}
258
259template<bool UseDeadLockDetector>
260[[nodiscard]] inline bool unfair_mutex_impl<UseDeadLockDetector>::holds_invariant() const noexcept
261{
262 return semaphore.load(std::memory_order::relaxed) <= 2;
263}
264
265template<bool UseDeadLockDetector>
266hi_no_inline inline void unfair_mutex_impl<UseDeadLockDetector>::lock_contended(semaphore_value_type expected) noexcept
267{
268 hi_axiom(holds_invariant());
269
270 do {
271 hilet should_wait = expected == 2;
272
273 // Set to 2 when we are waiting.
274 expected = 1;
275 if (should_wait || semaphore.compare_exchange_strong(expected, 2)) {
276 hi_axiom(holds_invariant());
277 semaphore.wait(2);
278 }
279
280 hi_axiom(holds_invariant());
281 // Set to 2 when acquiring the lock, so that during unlock we wake other waiting threads.
282 expected = 0;
283 } while (!semaphore.compare_exchange_strong(expected, 2));
284}
285
286}} // namespace hi::v1
An atomic access global variable for quick access to state of the system.
bool is_system_shutting_down() noexcept
Check if the HikoGUI system is being shut down.
Definition global_state.hpp:220
@ end
Start from the end of the file.
@ begin
Start from the beginning of the file.
@ other
The gui_event does not have associated data.
DOXYGEN BUG.
Definition algorithm.hpp:16
geometry/margins.hpp
Definition lookahead_iterator.hpp:5
hi_export void unfair_mutex_deadlock_remove_object(void *object) noexcept
Remove the object from the detection.
Definition unfair_mutex_impl.hpp:128
hi_export void unfair_mutex_deadlock_clear_stack() noexcept
Clear the stack.
Definition unfair_mutex_impl.hpp:146
hi_export void * unfair_mutex_deadlock_lock(void *object) noexcept
Lock an object on this thread.
Definition unfair_mutex_impl.hpp:72
hi_export void unfair_mutex_deadlock_clear_graph() noexcept
Clear the graph.
Definition unfair_mutex_impl.hpp:159
constexpr Out narrow_cast(In const &rhs) noexcept
Cast numeric values without loss of precision.
Definition cast.hpp:377
hi_export bool unfair_mutex_deadlock_unlock(void *object) noexcept
Unlock an object on this thread.
Definition unfair_mutex_impl.hpp:99
An unfair mutex This is a fast implementation of a mutex which does not fairly arbitrate between mult...
Definition unfair_mutex_intf.hpp:38
bool try_lock() noexcept
When try_lock() is called from a thread that already owns the lock it will return false.
Definition unfair_mutex_impl.hpp:213
T binary_search(T... args)
T count(T... args)
T lock(T... args)
T make_pair(T... args)
T move(T... args)
T upper_bound(T... args)