HikoGUI
A low latency retained GUI
Loading...
Searching...
No Matches
unfair_rwmutex.hpp
1// Copyright Take Vos 2022.
2// Distributed under the Boost Software License, Version 1.0.
3// (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
4
5#pragma once
6
7#include "utility.hpp"
8#include "thread.hpp"
9#include "assert.hpp"
10#include "dead_lock_detector.hpp"
11#include <atomic>
12#include <memory>
13#include <thread>
14
15namespace hi::inline v1 {
16
33public:
34 bool is_locked() const noexcept
35 {
36 return to_bool(lock_count(_v.load(std::memory_order::relaxed)));
37 }
38
46 [[nodiscard]] hi_force_inline bool exclusive_try_lock() noexcept
47 {
48 value_type expected = 0;
49 if (not _exclusive_try_lock(expected)) {
50 [[unlikely]] return exclusive_lock_contended<false>(expected);
51 } else {
52 return false;
53 }
54 }
55
56 hi_force_inline void exclusive_lock() noexcept
57 {
58 value_type expected = 0;
59 if (not _exclusive_try_lock(expected)) {
60 [[unlikely]] exclusive_lock_contended<true>(expected);
61 }
62 }
63
68 hi_force_inline void exclusive_unlock() noexcept
69 {
70 hi_axiom(holds_invariant());
71
72 if (_v.fetch_sub(exclusive_one, std::memory_order::release) - exclusive_one) {
73 hi_axiom(holds_invariant());
74
75 // There can't be any shared-locks outstanding when `fetch_sub()` was called.
76 // Which means if any bit is set then there where waiters. Wake one up.
77 _v.notify_one();
78 }
79
80 hi_axiom(holds_invariant());
81 }
82
86 hi_force_inline void shared_lock() noexcept
87 {
88 value_type expected = 0;
89 if (not _shared_try_lock(expected)) {
90 [[unlikely]] shared_lock_contended(expected);
91 }
92 }
93
94 [[nodiscard]] hi_force_inline bool shared_try_lock() noexcept
95 {
96 value_type expected = 0;
97 return _shared_try_lock(expected);
98 }
99
100 hi_force_inline void shared_unlock() noexcept
101 {
102 hi_axiom(holds_invariant());
103
104 hilet tmp = _v.fetch_sub(shared_one, std::memory_order::release) - shared_one;
105 if (tmp != 0 and tmp < shared_one) {
106 // There can't be any exclusive-locks outstanding when `fetch_sub()` was called.
107 // This was also the last shared-lock.
108 // Which means if any bit is set then there where waiters. Wake one up.
109 _v.notify_one();
110 }
111
112 hi_axiom(holds_invariant());
113 }
114
115private:
116 using atomic_value_type = std::atomic_unsigned_lock_free;
117 using value_type = atomic_value_type::value_type;
118
119 constexpr static value_type total_bit = sizeof(value_type) * CHAR_BIT;
120 constexpr static value_type waiter_bit = total_bit / 2;
121 constexpr static value_type exclusive_bit = 1;
122 constexpr static value_type shared_bit = total_bit - exclusive_bit - waiter_bit;
123
124 constexpr static value_type exclusive_off = 0;
125 constexpr static value_type shared_off = exclusive_off + exclusive_bit;
126 constexpr static value_type waiter_off = shared_off + shared_bit;
127
128 constexpr static value_type exclusive_mask = ((value_type{1} << exclusive_bit) - 1) << exclusive_off;
129 constexpr static value_type shared_mask = ((value_type{1} << shared_bit) - 1) << shared_off;
130 constexpr static value_type waiter_mask = ((value_type{1} << waiter_bit) - 1) << waiter_off;
131
132 constexpr static value_type exclusive_one = value_type{1} << exclusive_off;
133 constexpr static value_type shared_one = value_type{1} << shared_off;
134 constexpr static value_type waiter_one = value_type{1} << waiter_off;
135
151 atomic_value_type _v = 0;
152
153 [[nodiscard]] static value_type exclusive_count(value_type value) noexcept
154 {
155 return value & exclusive_mask;
156 }
157
158 [[nodiscard]] static value_type shared_count(value_type value) noexcept
159 {
160 static_assert(waiter_off + waiter_bit == total_bit);
161 static_assert(exclusive_off == 0);
162 static_assert(shared_off == exclusive_off + exclusive_bit);
163
164 value <<= total_bit - waiter_off;
165 value >>= (total_bit - waiter_off) + shared_off;
166 return value;
167 }
168
169 [[nodiscard]] static bool _is_locked(value_type value) noexcept
170 {
171 static_assert(waiter_off + waiter_bit == total_bit);
172
173 return to_bool(value << (total_bit - waiter_off));
174 }
175
176 [[nodiscard]] static value_type increment_exclusive(value_type value) noexcept
177 {
178 hi_axiom(value & exclusive_mask != exclusive_mask);
179 return value + exclusive_one;
180 }
181
182 [[nodiscard]] static value_type increment_shared(value_type value) noexcept
183 {
184 hi_axiom(value & shared_mask != shared_mask);
185 return value + shared_one;
186 }
187
188 [[nodiscard]] static value_type increment_wait(value_type value) noexcept
189 {
190 hi_axiom(value & waiter_mask != waiter_mask);
191 return value + waiter_one;
192 }
193
194 [[nodiscard]] static value_type clear_exclusive_and_waiter(value_type value) noexcept
195 {
196 return value & shared_mask;
197 }
198
199 [[nodiscard]] bool holds_invariant() const noexcept
200 {
201 hilet tmp = _v.load(std::memory_order::relaxed);
202 return not(to_bool(shared_count(tmp)) and to_bool(exclusive_count(tmp)));
203 }
204
205 hi_force_inline void wait(value_type& expected) noexcept
206 {
207 hi_axiom(holds_invariant());
208
209 // Keep track how many waiters there are.
210 hilet desired = increment_wait(expected);
211 if (_v.compare_exchange_strong(expected, desired, std::memory_order::relaxed)) {
212 hi_axiom(holds_invariant());
213 _v.wait(desired);
214
215 // Decrement the wait-count.
216 expected = _v.fetch_sub(waiter_one, std::memory_order::relaxed) - waiter_one;
217 }
218
219 hi_axiom(holds_invariant());
220 }
221
222 [[nodiscard]] hi_force_inline bool _exclusive_try_lock(value_type& expected) noexcept
223 {
224 hi_axiom(holds_invariant());
225
226 // A non-contended exclusive-lock can only be taken if both the shared-count and exclusive-count are zero.
227 // To improve performance also expect the wait-count to be zero, that way we don't need to read the expected value first.
228 value_type expected = 0;
229 return _v.compare_exchange_strong(
230 expected, increment_exclusive(expected), std::memory_order::acquire, std::memory_order::relaxed);
231 }
232
233 template<bool Wait>
234 hi_no_inline bool exclusive_lock_contended(value_type expected) noexcept
235 {
236 while (true) {
237 hi_axiom(holds_invariant());
238
239 if (_is_locked(expected)) {
240 if constexpr (Wait) {
241 wait(expected);
242 } else {
243 return false;
244 }
245
246 } else {
247 if (_v.compare_exchange_strong(
248 expected, increment_exclusive(expected), std::memory_order::acquire, std::memory_order::relaxed)) {
249 hi_axiom(holds_invariant());
250 return true;
251 }
252 }
253 }
254 }
255
256 [[nodiscard]] bool _shared_try_lock(value_type& expected) noexcept
257 {
258 // Increment the shared-count only when exclusive-count and waiter-count are zero.
259 expected = clear_exclusive_and_waiter(_v.load(std::memory_order::relaxed));
260
261 return _v.compare_exchange_strong(
262 expected, increment_shared(expected), std::memory_order::acquire, std::memory_order::relaxed);
263 }
264
265 hi_no_inline void shared_lock_contended(value_type expected) noexcept
266 {
267 while (true) {
268 hi_axiom(holds_invariant());
269
270 if (exclusive_count(expected)) {
271 wait(expected);
272
273 } else {
274 if (_v.compare_exchange_strong(
275 expected, increment_shared(expected), std::memory_order::acquire, std::memory_order::relaxed)) {
276 hi_axiom(holds_invariant());
277 return;
278 }
279 }
280 }
281 }
282};
283
284} // namespace hi::inline v1
Utilities to assert and bound check.
#define hi_axiom(expression,...)
Specify an axiom; an expression that is true.
Definition assert.hpp:133
Utilities used by the HikoGUI library itself.
#define hilet
Invariant should be the default for variables.
Definition utility.hpp:23
DOXYGEN BUG.
Definition algorithm.hpp:15
An unfair mutex This is a fast implementation of a mutex which does not fairly arbitrate between mult...
Definition unfair_rwmutex.hpp:32
hi_force_inline void exclusive_unlock() noexcept
Unlock the mutex.
Definition unfair_rwmutex.hpp:68
hi_force_inline bool exclusive_try_lock() noexcept
When try_lock() is called from a thread that already owns the lock it will return false.
Definition unfair_rwmutex.hpp:46
hi_force_inline void shared_lock() noexcept
Get a shared lock.
Definition unfair_rwmutex.hpp:86