1#[cfg(not(feature = "deadlock_detection"))]
6mod mutex_impl {
7 #[derive(Default)]
13 pub struct Mutex<T>(parking_lot::Mutex<T>);
14
15 pub use parking_lot::MutexGuard;
17
18 impl<T> Mutex<T> {
19 #[inline(always)]
20 pub fn new(val: T) -> Self {
21 Self(parking_lot::Mutex::new(val))
22 }
23
24 #[inline(always)]
25 pub fn lock(&self) -> MutexGuard<'_, T> {
26 self.0.lock()
27 }
28 }
29}
30
31#[cfg(feature = "deadlock_detection")]
32mod mutex_impl {
33 #[derive(Default)]
39 pub struct Mutex<T>(parking_lot::Mutex<T>);
40
41 pub struct MutexGuard<'a, T>(parking_lot::MutexGuard<'a, T>, *const ());
43
44 #[derive(Default)]
45 struct HeldLocks(Vec<*const ()>);
46
47 impl HeldLocks {
48 #[inline(always)]
49 fn insert(&mut self, lock: *const ()) {
50 assert!(
52 !self.0.contains(&lock),
53 "Recursively locking a Mutex in the same thread is not supported"
54 );
55 self.0.push(lock);
56 }
57
58 #[inline(always)]
59 fn remove(&mut self, lock: *const ()) {
60 self.0.retain(|&ptr| ptr != lock);
61 }
62 }
63
64 thread_local! {
65 static HELD_LOCKS_TLS: std::cell::RefCell<HeldLocks> = Default::default();
66 }
67
68 impl<T> Mutex<T> {
69 #[inline(always)]
70 pub fn new(val: T) -> Self {
71 Self(parking_lot::Mutex::new(val))
72 }
73
74 pub fn lock(&self) -> MutexGuard<'_, T> {
75 let ptr = std::ptr::from_ref::<parking_lot::Mutex<_>>(&self.0).cast::<()>();
79
80 HELD_LOCKS_TLS.with(|held_locks| {
82 held_locks.borrow_mut().insert(ptr);
83 });
84
85 MutexGuard(self.0.lock(), ptr)
86 }
87
88 #[inline(always)]
89 pub fn into_inner(self) -> T {
90 self.0.into_inner()
91 }
92 }
93
94 impl<T> Drop for MutexGuard<'_, T> {
95 fn drop(&mut self) {
96 let ptr = self.1;
97 HELD_LOCKS_TLS.with(|held_locks| {
98 held_locks.borrow_mut().remove(ptr);
99 });
100 }
101 }
102
103 impl<T> std::ops::Deref for MutexGuard<'_, T> {
104 type Target = T;
105
106 #[inline(always)]
107 fn deref(&self) -> &Self::Target {
108 &self.0
109 }
110 }
111
112 impl<T> std::ops::DerefMut for MutexGuard<'_, T> {
113 #[inline(always)]
114 fn deref_mut(&mut self) -> &mut Self::Target {
115 &mut self.0
116 }
117 }
118}
119
120#[cfg(not(feature = "deadlock_detection"))]
123mod rw_lock_impl {
124 pub use parking_lot::MappedRwLockReadGuard as RwLockReadGuard;
126
127 pub use parking_lot::MappedRwLockWriteGuard as RwLockWriteGuard;
129
130 #[derive(Default)]
136 pub struct RwLock<T: ?Sized>(parking_lot::RwLock<T>);
137
138 impl<T> RwLock<T> {
139 #[inline(always)]
140 pub fn new(val: T) -> Self {
141 Self(parking_lot::RwLock::new(val))
142 }
143 }
144
145 impl<T: ?Sized> RwLock<T> {
146 #[inline(always)]
147 pub fn read(&self) -> RwLockReadGuard<'_, T> {
148 parking_lot::RwLockReadGuard::map(self.0.read(), |v| v)
149 }
150
151 #[inline(always)]
152 pub fn write(&self) -> RwLockWriteGuard<'_, T> {
153 parking_lot::RwLockWriteGuard::map(self.0.write(), |v| v)
154 }
155 }
156}
157
158#[cfg(feature = "deadlock_detection")]
159mod rw_lock_impl {
160 use std::{
161 ops::{Deref, DerefMut},
162 sync::Arc,
163 thread::ThreadId,
164 };
165
166 use ahash::HashMap;
167 use parking_lot::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
168
169 pub struct RwLockReadGuard<'a, T> {
171 guard: Option<MappedRwLockReadGuard<'a, T>>,
174 holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
175 }
176
177 impl<'a, T> RwLockReadGuard<'a, T> {
178 #[inline]
179 pub fn map<U, F>(mut s: Self, f: F) -> RwLockReadGuard<'a, U>
180 where
181 F: FnOnce(&T) -> &U,
182 {
183 RwLockReadGuard {
184 guard: s
185 .guard
186 .take()
187 .map(|g| parking_lot::MappedRwLockReadGuard::map(g, f)),
188 holders: Arc::clone(&s.holders),
189 }
190 }
191 }
192
193 impl<T> Deref for RwLockReadGuard<'_, T> {
194 type Target = T;
195
196 fn deref(&self) -> &Self::Target {
197 self.guard.as_ref().unwrap()
198 }
199 }
200
201 impl<T> Drop for RwLockReadGuard<'_, T> {
202 fn drop(&mut self) {
203 let tid = std::thread::current().id();
204 self.holders.lock().remove(&tid);
205 }
206 }
207
208 pub struct RwLockWriteGuard<'a, T> {
210 guard: Option<MappedRwLockWriteGuard<'a, T>>,
213 holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
214 }
215
216 impl<'a, T> RwLockWriteGuard<'a, T> {
217 #[inline]
218 pub fn map<U, F>(mut s: Self, f: F) -> RwLockWriteGuard<'a, U>
219 where
220 F: FnOnce(&mut T) -> &mut U,
221 {
222 RwLockWriteGuard {
223 guard: s
224 .guard
225 .take()
226 .map(|g| parking_lot::MappedRwLockWriteGuard::map(g, f)),
227 holders: Arc::clone(&s.holders),
228 }
229 }
230 }
231
232 impl<T> Deref for RwLockWriteGuard<'_, T> {
233 type Target = T;
234
235 fn deref(&self) -> &Self::Target {
236 self.guard.as_ref().unwrap()
237 }
238 }
239
240 impl<T> DerefMut for RwLockWriteGuard<'_, T> {
241 fn deref_mut(&mut self) -> &mut Self::Target {
242 self.guard.as_mut().unwrap()
243 }
244 }
245
246 impl<T> Drop for RwLockWriteGuard<'_, T> {
247 fn drop(&mut self) {
248 let tid = std::thread::current().id();
249 self.holders.lock().remove(&tid);
250 }
251 }
252
253 #[derive(Default)]
259 pub struct RwLock<T> {
260 lock: parking_lot::RwLock<T>,
261 holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
266 }
267
268 impl<T> RwLock<T> {
269 pub fn new(val: T) -> Self {
270 Self {
271 lock: parking_lot::RwLock::new(val),
272 holders: Default::default(),
273 }
274 }
275
276 pub fn read(&self) -> RwLockReadGuard<'_, T> {
277 let tid = std::thread::current().id();
278
279 let would_deadlock =
281 self.lock.is_locked_exclusive() && self.holders.lock().contains_key(&tid);
282 assert!(
283 !would_deadlock,
284 "{} DEAD-LOCK DETECTED ({:?})!\n\
285 Trying to grab read-lock at:\n{}\n\
286 which is already exclusively held by current thread at:\n{}\n\n",
287 std::any::type_name::<Self>(),
288 tid,
289 format_backtrace(&mut make_backtrace()),
290 format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
291 );
292
293 self.holders
294 .lock()
295 .entry(tid)
296 .or_insert_with(make_backtrace);
297
298 RwLockReadGuard {
299 guard: parking_lot::RwLockReadGuard::map(self.lock.read(), |v| v).into(),
300 holders: Arc::clone(&self.holders),
301 }
302 }
303
304 pub fn write(&self) -> RwLockWriteGuard<'_, T> {
305 let tid = std::thread::current().id();
306
307 let would_deadlock = self.lock.is_locked() && self.holders.lock().contains_key(&tid);
309 assert!(
310 !would_deadlock,
311 "{} DEAD-LOCK DETECTED ({:?})!\n\
312 Trying to grab write-lock at:\n{}\n\
313 which is already held by current thread at:\n{}\n\n",
314 std::any::type_name::<Self>(),
315 tid,
316 format_backtrace(&mut make_backtrace()),
317 format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
318 );
319
320 self.holders
321 .lock()
322 .entry(tid)
323 .or_insert_with(make_backtrace);
324
325 RwLockWriteGuard {
326 guard: parking_lot::RwLockWriteGuard::map(self.lock.write(), |v| v).into(),
327 holders: Arc::clone(&self.holders),
328 }
329 }
330
331 #[inline(always)]
332 pub fn into_inner(self) -> T {
333 self.lock.into_inner()
334 }
335 }
336
337 fn make_backtrace() -> backtrace::Backtrace {
338 backtrace::Backtrace::new_unresolved()
339 }
340
341 fn format_backtrace(backtrace: &mut backtrace::Backtrace) -> String {
342 backtrace.resolve();
343
344 let stacktrace = format!("{backtrace:?}");
345
346 let end_offset = stacktrace
348 .find("std::sys_common::backtrace::__rust_begin_short_backtrace")
349 .unwrap_or(stacktrace.len());
350 let stacktrace = &stacktrace[..end_offset];
351
352 let first_interesting_function = "epaint::mutex::rw_lock_impl::make_backtrace\n";
353 if let Some(start_offset) = stacktrace.find(first_interesting_function) {
354 stacktrace[start_offset + first_interesting_function.len()..].to_owned()
355 } else {
356 stacktrace.to_owned()
357 }
358 }
359}
360
361pub use mutex_impl::{Mutex, MutexGuard};
364pub use rw_lock_impl::{RwLock, RwLockReadGuard, RwLockWriteGuard};
365
366impl<T> Clone for Mutex<T>
367where
368 T: Clone,
369{
370 fn clone(&self) -> Self {
371 Self::new(self.lock().clone())
372 }
373}
374
375#[cfg(test)]
378mod tests {
379 #![allow(clippy::disallowed_methods)] use crate::mutex::Mutex;
382 use std::time::Duration;
383
384 #[test]
385 fn lock_two_different_mutexes_single_thread() {
386 let one = Mutex::new(());
387 let two = Mutex::new(());
388 let _a = one.lock();
389 let _b = two.lock();
390 }
391
392 #[test]
393 fn lock_multiple_threads() {
394 use std::sync::Arc;
395 let one = Arc::new(Mutex::new(()));
396 let our_lock = one.lock();
397 let other_thread = {
398 let one = Arc::clone(&one);
399 std::thread::spawn(move || {
400 let _lock = one.lock();
401 })
402 };
403 std::thread::sleep(Duration::from_millis(200));
404 drop(our_lock);
405 other_thread.join().unwrap();
406 }
407}
408
409#[cfg(not(target_arch = "wasm32"))]
410#[cfg(feature = "deadlock_detection")]
411#[cfg(test)]
412mod tests_rwlock {
413 #![allow(clippy::disallowed_methods)] use crate::mutex::RwLock;
416 use std::time::Duration;
417
418 #[test]
419 fn lock_two_different_rwlocks_single_thread() {
420 let one = RwLock::new(());
421 let two = RwLock::new(());
422 let _a = one.write();
423 let _b = two.write();
424 }
425
426 #[test]
427 fn rwlock_multiple_threads() {
428 use std::sync::Arc;
429 let one = Arc::new(RwLock::new(()));
430 let our_lock = one.write();
431 let other_thread1 = {
432 let one = Arc::clone(&one);
433 std::thread::spawn(move || {
434 let _ = one.write();
435 })
436 };
437 let other_thread2 = {
438 let one = Arc::clone(&one);
439 std::thread::spawn(move || {
440 let _ = one.read();
441 })
442 };
443 std::thread::sleep(Duration::from_millis(200));
444 drop(our_lock);
445 other_thread1.join().unwrap();
446 other_thread2.join().unwrap();
447 }
448
449 #[test]
450 #[should_panic]
451 fn rwlock_write_write_reentrancy() {
452 let one = RwLock::new(());
453 let _a1 = one.write();
454 let _a2 = one.write(); }
456
457 #[test]
458 #[should_panic]
459 fn rwlock_write_read_reentrancy() {
460 let one = RwLock::new(());
461 let _a1 = one.write();
462 let _a2 = one.read(); }
464
465 #[test]
466 #[should_panic]
467 fn rwlock_read_write_reentrancy() {
468 let one = RwLock::new(());
469 let _a1 = one.read();
470 let _a2 = one.write(); }
472
473 #[test]
474 fn rwlock_read_read_reentrancy() {
475 let one = RwLock::new(());
476 let _a1 = one.read();
477 let _a2 = one.read();
480 }
481
482 #[test]
483 fn rwlock_short_read_foreign_read_write_reentrancy() {
484 use std::sync::Arc;
485
486 let lock = Arc::new(RwLock::new(()));
487
488 let t0r0 = lock.read();
490
491 let other_thread = {
493 let lock = Arc::clone(&lock);
494 std::thread::spawn(move || {
495 let _t1r0 = lock.read();
496 })
497 };
498 other_thread.join().unwrap();
499
500 drop(t0r0);
502
503 let _t0w0 = lock.write();
505 }
506
507 #[test]
508 #[should_panic]
509 fn rwlock_read_foreign_read_write_reentrancy() {
510 use std::sync::Arc;
511
512 let lock = Arc::new(RwLock::new(()));
513
514 let _t0r0 = lock.read();
516
517 let other_thread = {
519 let lock = Arc::clone(&lock);
520 std::thread::spawn(move || {
521 let _t1r0 = lock.read();
522 })
523 };
524 other_thread.join().unwrap();
525
526 let _t0w0 = lock.write(); }
529}