libclod
C library for interacting with NBTs, region files, LOD data and other things.
Loading...
Searching...
No Matches
lock.c
1#include "config.h"
2#include "debug.h"
3
4#include <clod/thread.h>
5#include <clod/rwseq.h>
6#include <clod/sys/futex.h>
7#include <time.h>
8
9#include "keepalive.h"
10
12#define READ_LOCKS_MAX 63
13
16#define MAX_FUTEX_WAIT_MS 100
17
19#define DEAD_LOCK_TIMEOUT_MS 10000
20
21struct lock {
22 uint32_t blocked: 1;
23 uint32_t write_lock: 1;
24 uint32_t read_locks: 5;
25 uint32_t read_seq: 3;
26 uint32_t write_seq: 22;
27};
28static struct lock decode(uint32_t val) {
29 val = (val & 0x0000FFFF) << 16 | (val & 0xFFFF0000) >> 16;
30 val = (val & 0x00FF00FF) << 8 | (val & 0xFF00FF00) >> 8 ;
31 struct lock lock;
32 lock.blocked = ((val >> 0) & 1);
33 lock.write_lock = ((val >> 1) & 1);
34 lock.read_locks = ((val >> 2) & 31);
35 lock.read_seq = ((val >> 7) & 7);
36 lock.write_seq = (uint64_t)(val >> 10) & 0x3FFFFF;
37 return lock;
38}
39static uint32_t encode(const struct lock lock) {
40 uint32_t val =
41 (uint32_t)lock.blocked << 0|
42 (uint32_t)lock.write_lock << 1|
43 (uint32_t)lock.read_locks << 2|
44 (uint32_t)lock.read_seq << 7|
45 (uint32_t)lock.write_seq << 10;
46 val = (val & 0x0000FFFF) << 16 | (val & 0xFFFF0000) >> 16;
47 val = (val & 0x00FF00FF) << 8 | (val & 0xFF00FF00) >> 8 ;
48 return val;
49}
50static bool equal(const struct lock lock1, const struct lock lock2) {
51 return
52 lock1.blocked == lock2.blocked &&
53 lock1.write_lock == lock2.write_lock &&
54 lock1.read_locks == lock2.read_locks &&
55 lock1.read_seq == lock2.read_seq &&
56 lock1.write_seq == lock2.write_seq;
57}
58static struct lock load(const uint32_t *ptr) { return decode(clod_atomic_load(ptr)); }
59static bool cas(uint32_t *ptr, struct lock *expected, const struct lock desired) {
60 uint32_t expected_val = encode(*expected);
61 if (!clod_atomic_cas(ptr, &expected_val, encode(desired))) {
62 *expected = decode(expected_val);
63 return false;
64 }
65 *expected = desired;
66 return true;
67}
68static struct timespec now() {
69 struct timespec now;
70 if (timespec_get(&now, CLOCK_MONOTONIC) == 0) {
71 debug(CLOD_RWSEQ_DEBUG, "System clock doesn't support monotonic time.");
72 return (struct timespec){0};
73 }
74 return now;
75}
76static int time_delta(struct timespec *last) {
77 struct timespec current = now();
78 int diff =
79 (int)((current.tv_sec - last->tv_sec) * 1000) +
80 (int)((current.tv_nsec - last->tv_nsec) / 1000000);
81
82 *last = current;
83 return diff;
84}
85static struct lock wait(const uint32_t *ptr, const struct lock expected, int *timeout) {
86 auto time = now();
87 struct lock lock = load(ptr);
88
89 // One might typically spinwait here a bit, or do some other intermediate waiting before
90 // sleeping the thread. Context switches are somewhat expensive, but the intended use case
91 // for these locks does not include very brief periods of locking. As such, we assume
92 // that it will be a while and go straight to sleep.
93 while (equal(lock, expected)) {
94 if (*timeout <= 0)
95 return lock;
96
97 [[maybe_unused]]
98 bool timed_out = clod_futex_wait((int*)ptr, (int)encode(expected), *timeout < MAX_FUTEX_WAIT_MS ? *timeout : MAX_FUTEX_WAIT_MS);
99
100 *timeout -= time_delta(&time);
101 lock = load(ptr);
102
103 if (timed_out) {
104 // These checks are possibly spurious,
105 // as the other thread may have changed the value in the time between us waking up due to timeout
106 // and us actually checking the value.
107 if (lock.blocked == 0 && expected.blocked)
108 debug(CLOD_RWSEQ_DEBUG, "%ptr Blocked flag was unset, but we weren't woken up. Possible bug in other thread.\n", (void*)ptr);
109 if (lock.write_lock == 0 && expected.write_lock)
110 debug(CLOD_RWSEQ_DEBUG, "%ptr Write lock released, but we weren't woken up. Possible bug in other thread.\n", (void*)ptr);
111 if (lock.read_locks == 0 && expected.read_locks)
112 debug(CLOD_RWSEQ_DEBUG, "%ptr Read locks released, but we weren't woken up. Possible bug in other thread.\n", (void*)ptr);
113 }
114 }
115 return lock;
116}
117static void wake(const uint32_t *ptr) { clod_futex_wake_all((int*)ptr); }
118
119enum clod_rwseq_result clod_rwseq_ro_lock(const uint32_t *ptr, uint32_t *seq_out) {
120 int timeout = DEAD_LOCK_TIMEOUT_MS;
121 struct lock lock = load(ptr);
122 while (lock.blocked || lock.write_lock) {
123 if (timeout <= 0)
124 return CLOD_RWSEQ_DEAD;
125
126 struct lock actual = wait(ptr, lock, &timeout);
127
128 if (actual.write_seq != lock.write_seq)
129 timeout = DEAD_LOCK_TIMEOUT_MS;
130
131 lock = actual;
132 }
133
134 *seq_out = lock.write_seq;
135 return CLOD_RWSEQ_OK;
136}
137enum clod_rwseq_result clod_rwseq_ro_unlock(const uint32_t *ptr, const uint32_t seq) {
138 const struct lock current = load(ptr);
139
140 if (current.write_seq != seq)
142
143 return CLOD_RWSEQ_OK;
144}
145
146bool clod_rwseq_rd_heartbeat(void *ptr) {
147 struct lock lock = load(ptr);
148 struct lock want;
149 do {
150 if (lock.read_locks == 0) {
151 debug(CLOD_RWSEQ_DEBUG, "%ptr Possible failure to unlock read lock. Read sequence heartbeat task left running on lock with no read locks.", ptr);
152 return true;
153 }
154
155 want = lock;
156 want.read_seq++;
157 } while (!cas(ptr, &lock, want));
158 return false;
159}
160enum clod_rwseq_result clod_rwseq_rd_lock(uint32_t *ptr) {
161 int timeout = DEAD_LOCK_TIMEOUT_MS;
162 struct lock lock = load(ptr);
163 struct lock want;
164 do {
165 while (lock.blocked || lock.write_lock || lock.read_locks == READ_LOCKS_MAX) {
166 if (timeout <= 0)
167 return CLOD_RWSEQ_DEAD;
168
169 struct lock actual = wait(ptr, lock, &timeout);
170
171 if (actual.write_seq != lock.write_seq)
172 timeout = DEAD_LOCK_TIMEOUT_MS;
173 }
174
175 want = lock;
176 want.read_locks++;
177 } while (!cas(ptr, &lock, want));
178
179 clod_rwseq_rd_keepalive_start((int*)ptr);
180 return CLOD_RWSEQ_OK;
181}
182enum clod_rwseq_result clod_rwseq_rd_unlock(uint32_t *ptr) {
183 clod_rwseq_rd_keepalive_end((int*)ptr);
184 struct lock lock = load(ptr);
185 struct lock want;
186 do {
187 if (lock.read_locks == 0) {
188 debug(CLOD_RWSEQ_DEBUG, "%ptr Attempted to unlock already unlocked read lock.", (void*)ptr);
189 return CLOD_RWSEQ_MISUSE;
190 }
191
192 want = lock;
193 want.read_locks--;
194 } while (!cas(ptr, &lock, want));
195
196 if (lock.read_locks == 0)
197 wake(ptr);
198
199 return CLOD_RWSEQ_OK;
200}
201
202
203
204/*
205
206enum clod_rwseq_result clod_rwseq_rd_lock(uint32_t *ptr) {
207 int timeout = DEAD_LOCK_TIMEOUT_MS;
208 struct lock lock = load(ptr);
209
210 while (lock.blocked || lock.write_lock || lock.read_locks == READ_LOCKS_MAX) {
211
212 }
213
214
215
216 struct lock lock = {0};
217
218 struct lock want;
219 int timeout = dead_threshold_ms;
220 do {
221 while (lock.blocked || lock.write_lock || lock.read_locks == READ_LOCKS_MAX) {
222 if (timeout <= 0)
223 return CLOD_RWSEQ_DEAD;
224
225 if (!wait(ptr, lock, &timeout))
226 return CLOD_RWSEQ_OTHER;
227
228 if (load_progress(ptr, &lock))
229 timeout = dead_threshold_ms;
230 }
231
232 want = lock;
233 want.read_locks++;
234 } while (!cas(ptr, &lock, want));
235
236 *seq_out = lock.sequence;
237 return CLOD_RWSEQ_OK;
238}
239enum clod_rwseq_result clod_rwseq_rd_unlock(uint32_t *ptr, uint32_t seq) {
240 struct lock lock = load(ptr);
241 struct lock want;
242 do {
243 want = lock;
244 if (want.read_locks > 0)
245 want.read_locks--;
246 } while (!cas(ptr, &lock, want));
247
248
249
250 if (lock.read_locks == 0)
251 if (!wake(ptr))
252 return CLOD_RWSEQ_OTHER;
253 return CLOD_RWSEQ_OK;
254}
255
256enum clod_rwseq_result clod_rwseq_wr_lock(uint32_t *ptr, int dead_threshold_ms) {
257 struct lock lock = load(ptr);
258 struct lock want;
259 int timeout = dead_threshold_ms;
260 bool dead_acquired = false;
261
262 // Acquire the write lock.
263 do {
264 while (lock.blocked || lock.write_lock) {
265 if (timeout <= 0) {
266 dead_acquired = true;
267 break;
268 }
269
270 if (!wait(ptr, lock, &timeout))
271 return CLOD_RWSEQ_OTHER;
272
273 if (load_progress(ptr, &lock))
274 timeout = dead_threshold_ms;
275 }
276
277 want = lock;
278 want.blocked = 0;
279 want.write_lock = 1;
280 want.sequence++;
281 } while (!cas(ptr, &lock, want));
282
283 timeout = dead_threshold_ms;
284 while (lock.read_locks) {
285 // We need to wait for remaining readers to finish.
286 if (timeout <= 0) {
287 // Assume remaining readers are dead.
288 do {
289 want = lock;
290 want.read_locks = 0;
291 want.sequence++;
292 } while (!cas(ptr, &lock, want));
293 }
294
295 if (!wait(ptr, lock, &timeout))
296 return CLOD_RWSEQ_OTHER;
297
298 // Keep our lock alive while we wait for readers.
299 do {
300 want = lock;
301 want.sequence++;
302 } while (!cas(ptr, &lock, want));
303 }
304
305 return CLOD_RWSEQ_OK;
306}
307enum clod_rwseq_result clod_rwseq_wr_refresh(uint32_t *ptr) {
308 struct lock lock = load(ptr);
309 if (!lock.write_lock)
310 return CLOD_RWSEQ_INTERRUPTED;
311
312 struct lock want;
313
314
315
316 struct lock current = load(ptr);
317 struct lock want;
318 do {
319 if (current.sequence != lock->sequence)
320 return LOCK_INTERRUPTED;
321
322 if (!current.write_lock)
323 return LOCK_MISUSE;
324
325 want = current;
326 want.sequence++;
327 } while (!cas(ptr, &current, want));
328
329 lock->sequence = current.sequence;
330 return LOCK_OK;
331}
332enum lock_result lock_wr_unlock(void *ptr, struct lock lock) {
333 struct lock current = load(ptr);
334 struct lock want;
335 do {
336 if (current.sequence != lock.sequence)
337 return LOCK_INTERRUPTED;
338
339 if (!current.write_lock)
340 return LOCK_MISUSE;
341
342
343 }
344}
345
346enum lock_result lock_wr_lock_many(void *ptr, bitarray dead_owners_out, size_t count, int dead_threshold_ms);
347enum lock_result lock_wr_refresh_many(void *ptr, bitarray mask, size_t count);
348enum lock_result lock_wr_unlock_many(void *ptr, size_t count);
349
350enum lock_result lock_ro_lock(const uint8_t *ptr, struct lock *lock_out, int dead_threshold_ms) {
351 struct lock lock = load(ptr);
352 int timeout = dead_threshold_ms;
353
354 while (lock.blocked || lock.write_lock) {
355 const enum lock_result res = lock_wait(ptr, lock, &timeout);
356 if (res != LOCK_OK && res != LOCK_TIMEOUT) return res;
357
358 const struct lock new = load(ptr);
359 if (new.write_lock != lock.write_lock || new.generation != lock.generation)
360 timeout = dead_threshold_ms;
361 else if (res == LOCK_TIMEOUT)
362 return res;
363
364 lock = new;
365 }
366
367 *lock_out = lock;
368 return LOCK_OK;
369}
370enum lock_result lock_ro_unlock(const uint8_t *ptr, struct lock lock) {
371 const struct lock current = load(ptr);
372 if (current.generation != lock.generation) {
373 return LOCK_INTERRUPTED;
374 }
375 return LOCK_OK;
376}
377
378enum lock_result lock_rd_lock(uint8_t *ptr, struct lock *lock_out, int dead_threshold_ms) {
379 struct lock lock = load(ptr);
380 struct lock want;
381 int timeout = dead_threshold_ms;
382 do {
383 while (lock.blocked || lock.write_lock || lock.read_locks == READ_LOCKS_MAX) {
384 const enum lock_result res = lock_wait(ptr, lock, &timeout);
385 if (res != LOCK_OK) return res;
386
387 want = load(ptr);
388 if (want.)
389
390 lock = want;
391 }
392
393 want = lock;
394 want.read_locks++;
395 } while (!cas(ptr, &lock, want));
396 *lock_out = want;
397 return LOCK_OK;
398}
399*/
clod_rwseq_result
Definition rwseq.h:12
@ CLOD_RWSEQ_DEAD
Definition rwseq.h:39
@ CLOD_RWSEQ_INTERRUPTED
Definition rwseq.h:48
@ CLOD_RWSEQ_OK
No worries at all!
Definition rwseq.h:14
@ CLOD_RWSEQ_MISUSE
Definition rwseq.h:20
Definition lock.c:21