epaint/
mutex.rs

1//! Helper module that adds extra checks when the `deadlock_detection` feature is turned on.
2
3// ----------------------------------------------------------------------------
4
5#[cfg(not(feature = "deadlock_detection"))]
6mod mutex_impl {
7    /// Provides interior mutability.
8    ///
9    /// This is a thin wrapper around [`parking_lot::Mutex`], except if
10    /// the feature `deadlock_detection` is turned enabled, in which case
11    /// extra checks are added to detect deadlocks.
12    #[derive(Default)]
13    pub struct Mutex<T>(parking_lot::Mutex<T>);
14
15    /// The lock you get from [`Mutex`].
16    pub use parking_lot::MutexGuard;
17
18    impl<T> Mutex<T> {
19        #[inline(always)]
20        pub fn new(val: T) -> Self {
21            Self(parking_lot::Mutex::new(val))
22        }
23
24        #[inline(always)]
25        pub fn lock(&self) -> MutexGuard<'_, T> {
26            self.0.lock()
27        }
28    }
29}
30
31#[cfg(feature = "deadlock_detection")]
32mod mutex_impl {
33    /// Provides interior mutability.
34    ///
35    /// This is a thin wrapper around [`parking_lot::Mutex`], except if
36    /// the feature `deadlock_detection` is turned enabled, in which case
37    /// extra checks are added to detect deadlocks.
38    #[derive(Default)]
39    pub struct Mutex<T>(parking_lot::Mutex<T>);
40
41    /// The lock you get from [`Mutex`].
42    pub struct MutexGuard<'a, T>(parking_lot::MutexGuard<'a, T>, *const ());
43
44    #[derive(Default)]
45    struct HeldLocks(Vec<*const ()>);
46
47    impl HeldLocks {
48        #[inline(always)]
49        fn insert(&mut self, lock: *const ()) {
50            // Very few locks will ever be held at the same time, so a linear search is fast
51            assert!(
52                !self.0.contains(&lock),
53                "Recursively locking a Mutex in the same thread is not supported"
54            );
55            self.0.push(lock);
56        }
57
58        #[inline(always)]
59        fn remove(&mut self, lock: *const ()) {
60            self.0.retain(|&ptr| ptr != lock);
61        }
62    }
63
64    thread_local! {
65        static HELD_LOCKS_TLS: std::cell::RefCell<HeldLocks> = Default::default();
66    }
67
68    impl<T> Mutex<T> {
69        #[inline(always)]
70        pub fn new(val: T) -> Self {
71            Self(parking_lot::Mutex::new(val))
72        }
73
74        pub fn lock(&self) -> MutexGuard<'_, T> {
75            // Detect if we are recursively taking out a lock on this mutex.
76
77            // use a pointer to the inner data as an id for this lock
78            let ptr = std::ptr::from_ref::<parking_lot::Mutex<_>>(&self.0).cast::<()>();
79
80            // Store it in thread local storage while we have a lock guard taken out
81            HELD_LOCKS_TLS.with(|held_locks| {
82                held_locks.borrow_mut().insert(ptr);
83            });
84
85            MutexGuard(self.0.lock(), ptr)
86        }
87
88        #[inline(always)]
89        pub fn into_inner(self) -> T {
90            self.0.into_inner()
91        }
92    }
93
94    impl<T> Drop for MutexGuard<'_, T> {
95        fn drop(&mut self) {
96            let ptr = self.1;
97            HELD_LOCKS_TLS.with(|held_locks| {
98                held_locks.borrow_mut().remove(ptr);
99            });
100        }
101    }
102
103    impl<T> std::ops::Deref for MutexGuard<'_, T> {
104        type Target = T;
105
106        #[inline(always)]
107        fn deref(&self) -> &Self::Target {
108            &self.0
109        }
110    }
111
112    impl<T> std::ops::DerefMut for MutexGuard<'_, T> {
113        #[inline(always)]
114        fn deref_mut(&mut self) -> &mut Self::Target {
115            &mut self.0
116        }
117    }
118}
119
120// ----------------------------------------------------------------------------
121
122#[cfg(not(feature = "deadlock_detection"))]
123mod rw_lock_impl {
124    /// The lock you get from [`RwLock::read`].
125    pub use parking_lot::MappedRwLockReadGuard as RwLockReadGuard;
126
127    /// The lock you get from [`RwLock::write`].
128    pub use parking_lot::MappedRwLockWriteGuard as RwLockWriteGuard;
129
130    /// Provides interior mutability.
131    ///
132    /// This is a thin wrapper around [`parking_lot::RwLock`], except if
133    /// the feature `deadlock_detection` is turned enabled, in which case
134    /// extra checks are added to detect deadlocks.
135    #[derive(Default)]
136    pub struct RwLock<T: ?Sized>(parking_lot::RwLock<T>);
137
138    impl<T> RwLock<T> {
139        #[inline(always)]
140        pub fn new(val: T) -> Self {
141            Self(parking_lot::RwLock::new(val))
142        }
143    }
144
145    impl<T: ?Sized> RwLock<T> {
146        #[inline(always)]
147        pub fn read(&self) -> RwLockReadGuard<'_, T> {
148            parking_lot::RwLockReadGuard::map(self.0.read(), |v| v)
149        }
150
151        #[inline(always)]
152        pub fn write(&self) -> RwLockWriteGuard<'_, T> {
153            parking_lot::RwLockWriteGuard::map(self.0.write(), |v| v)
154        }
155    }
156}
157
158#[cfg(feature = "deadlock_detection")]
159mod rw_lock_impl {
160    use std::{
161        ops::{Deref, DerefMut},
162        sync::Arc,
163        thread::ThreadId,
164    };
165
166    use ahash::HashMap;
167    use parking_lot::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
168
169    /// The lock you get from [`RwLock::read`].
170    pub struct RwLockReadGuard<'a, T> {
171        // The option is used only because we need to `take()` the guard out of self
172        // when doing remappings (`map()`), i.e. it's used as a safe `ManuallyDrop`.
173        guard: Option<MappedRwLockReadGuard<'a, T>>,
174        holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
175    }
176
177    impl<'a, T> RwLockReadGuard<'a, T> {
178        #[inline]
179        pub fn map<U, F>(mut s: Self, f: F) -> RwLockReadGuard<'a, U>
180        where
181            F: FnOnce(&T) -> &U,
182        {
183            RwLockReadGuard {
184                guard: s
185                    .guard
186                    .take()
187                    .map(|g| parking_lot::MappedRwLockReadGuard::map(g, f)),
188                holders: Arc::clone(&s.holders),
189            }
190        }
191    }
192
193    impl<T> Deref for RwLockReadGuard<'_, T> {
194        type Target = T;
195
196        fn deref(&self) -> &Self::Target {
197            self.guard.as_ref().unwrap()
198        }
199    }
200
201    impl<T> Drop for RwLockReadGuard<'_, T> {
202        fn drop(&mut self) {
203            let tid = std::thread::current().id();
204            self.holders.lock().remove(&tid);
205        }
206    }
207
208    /// The lock you get from [`RwLock::write`].
209    pub struct RwLockWriteGuard<'a, T> {
210        // The option is used only because we need to `take()` the guard out of self
211        // when doing remappings (`map()`), i.e. it's used as a safe `ManuallyDrop`.
212        guard: Option<MappedRwLockWriteGuard<'a, T>>,
213        holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
214    }
215
216    impl<'a, T> RwLockWriteGuard<'a, T> {
217        #[inline]
218        pub fn map<U, F>(mut s: Self, f: F) -> RwLockWriteGuard<'a, U>
219        where
220            F: FnOnce(&mut T) -> &mut U,
221        {
222            RwLockWriteGuard {
223                guard: s
224                    .guard
225                    .take()
226                    .map(|g| parking_lot::MappedRwLockWriteGuard::map(g, f)),
227                holders: Arc::clone(&s.holders),
228            }
229        }
230    }
231
232    impl<T> Deref for RwLockWriteGuard<'_, T> {
233        type Target = T;
234
235        fn deref(&self) -> &Self::Target {
236            self.guard.as_ref().unwrap()
237        }
238    }
239
240    impl<T> DerefMut for RwLockWriteGuard<'_, T> {
241        fn deref_mut(&mut self) -> &mut Self::Target {
242            self.guard.as_mut().unwrap()
243        }
244    }
245
246    impl<T> Drop for RwLockWriteGuard<'_, T> {
247        fn drop(&mut self) {
248            let tid = std::thread::current().id();
249            self.holders.lock().remove(&tid);
250        }
251    }
252
253    /// Provides interior mutability.
254    ///
255    /// This is a thin wrapper around [`parking_lot::RwLock`], except if
256    /// the feature `deadlock_detection` is turned enabled, in which case
257    /// extra checks are added to detect deadlocks.
258    #[derive(Default)]
259    pub struct RwLock<T> {
260        lock: parking_lot::RwLock<T>,
261        // Technically we'd need a list of backtraces per thread-id since parking_lot's
262        // read-locks are reentrant.
263        // In practice it's not that useful to have the whole list though, so we only
264        // keep track of the first backtrace for now.
265        holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
266    }
267
268    impl<T> RwLock<T> {
269        pub fn new(val: T) -> Self {
270            Self {
271                lock: parking_lot::RwLock::new(val),
272                holders: Default::default(),
273            }
274        }
275
276        pub fn read(&self) -> RwLockReadGuard<'_, T> {
277            let tid = std::thread::current().id();
278
279            // If it is write-locked, and we locked it (reentrancy deadlock)
280            let would_deadlock =
281                self.lock.is_locked_exclusive() && self.holders.lock().contains_key(&tid);
282            assert!(
283                !would_deadlock,
284                "{} DEAD-LOCK DETECTED ({:?})!\n\
285                    Trying to grab read-lock at:\n{}\n\
286                    which is already exclusively held by current thread at:\n{}\n\n",
287                std::any::type_name::<Self>(),
288                tid,
289                format_backtrace(&mut make_backtrace()),
290                format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
291            );
292
293            self.holders
294                .lock()
295                .entry(tid)
296                .or_insert_with(make_backtrace);
297
298            RwLockReadGuard {
299                guard: parking_lot::RwLockReadGuard::map(self.lock.read(), |v| v).into(),
300                holders: Arc::clone(&self.holders),
301            }
302        }
303
304        pub fn write(&self) -> RwLockWriteGuard<'_, T> {
305            let tid = std::thread::current().id();
306
307            // If it is locked in any way, and we locked it (reentrancy deadlock)
308            let would_deadlock = self.lock.is_locked() && self.holders.lock().contains_key(&tid);
309            assert!(
310                !would_deadlock,
311                "{} DEAD-LOCK DETECTED ({:?})!\n\
312                    Trying to grab write-lock at:\n{}\n\
313                    which is already held by current thread at:\n{}\n\n",
314                std::any::type_name::<Self>(),
315                tid,
316                format_backtrace(&mut make_backtrace()),
317                format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
318            );
319
320            self.holders
321                .lock()
322                .entry(tid)
323                .or_insert_with(make_backtrace);
324
325            RwLockWriteGuard {
326                guard: parking_lot::RwLockWriteGuard::map(self.lock.write(), |v| v).into(),
327                holders: Arc::clone(&self.holders),
328            }
329        }
330
331        #[inline(always)]
332        pub fn into_inner(self) -> T {
333            self.lock.into_inner()
334        }
335    }
336
337    fn make_backtrace() -> backtrace::Backtrace {
338        backtrace::Backtrace::new_unresolved()
339    }
340
341    fn format_backtrace(backtrace: &mut backtrace::Backtrace) -> String {
342        backtrace.resolve();
343
344        let stacktrace = format!("{backtrace:?}");
345
346        // Remove irrelevant parts of the stacktrace:
347        let end_offset = stacktrace
348            .find("std::sys_common::backtrace::__rust_begin_short_backtrace")
349            .unwrap_or(stacktrace.len());
350        let stacktrace = &stacktrace[..end_offset];
351
352        let first_interesting_function = "epaint::mutex::rw_lock_impl::make_backtrace\n";
353        if let Some(start_offset) = stacktrace.find(first_interesting_function) {
354            stacktrace[start_offset + first_interesting_function.len()..].to_owned()
355        } else {
356            stacktrace.to_owned()
357        }
358    }
359}
360
361// ----------------------------------------------------------------------------
362
363pub use mutex_impl::{Mutex, MutexGuard};
364pub use rw_lock_impl::{RwLock, RwLockReadGuard, RwLockWriteGuard};
365
366impl<T> Clone for Mutex<T>
367where
368    T: Clone,
369{
370    fn clone(&self) -> Self {
371        Self::new(self.lock().clone())
372    }
373}
374
375// ----------------------------------------------------------------------------
376
377#[cfg(test)]
378mod tests {
379    #![allow(clippy::disallowed_methods)] // Ok for tests
380
381    use crate::mutex::Mutex;
382    use std::time::Duration;
383
384    #[test]
385    fn lock_two_different_mutexes_single_thread() {
386        let one = Mutex::new(());
387        let two = Mutex::new(());
388        let _a = one.lock();
389        let _b = two.lock();
390    }
391
392    #[test]
393    fn lock_multiple_threads() {
394        use std::sync::Arc;
395        let one = Arc::new(Mutex::new(()));
396        let our_lock = one.lock();
397        let other_thread = {
398            let one = Arc::clone(&one);
399            std::thread::spawn(move || {
400                let _lock = one.lock();
401            })
402        };
403        std::thread::sleep(Duration::from_millis(200));
404        drop(our_lock);
405        other_thread.join().unwrap();
406    }
407}
408
409#[cfg(not(target_arch = "wasm32"))]
410#[cfg(feature = "deadlock_detection")]
411#[cfg(test)]
412mod tests_rwlock {
413    #![allow(clippy::disallowed_methods)] // Ok for tests
414
415    use crate::mutex::RwLock;
416    use std::time::Duration;
417
418    #[test]
419    fn lock_two_different_rwlocks_single_thread() {
420        let one = RwLock::new(());
421        let two = RwLock::new(());
422        let _a = one.write();
423        let _b = two.write();
424    }
425
426    #[test]
427    fn rwlock_multiple_threads() {
428        use std::sync::Arc;
429        let one = Arc::new(RwLock::new(()));
430        let our_lock = one.write();
431        let other_thread1 = {
432            let one = Arc::clone(&one);
433            std::thread::spawn(move || {
434                let _ = one.write();
435            })
436        };
437        let other_thread2 = {
438            let one = Arc::clone(&one);
439            std::thread::spawn(move || {
440                let _ = one.read();
441            })
442        };
443        std::thread::sleep(Duration::from_millis(200));
444        drop(our_lock);
445        other_thread1.join().unwrap();
446        other_thread2.join().unwrap();
447    }
448
449    #[test]
450    #[should_panic]
451    fn rwlock_write_write_reentrancy() {
452        let one = RwLock::new(());
453        let _a1 = one.write();
454        let _a2 = one.write(); // panics
455    }
456
457    #[test]
458    #[should_panic]
459    fn rwlock_write_read_reentrancy() {
460        let one = RwLock::new(());
461        let _a1 = one.write();
462        let _a2 = one.read(); // panics
463    }
464
465    #[test]
466    #[should_panic]
467    fn rwlock_read_write_reentrancy() {
468        let one = RwLock::new(());
469        let _a1 = one.read();
470        let _a2 = one.write(); // panics
471    }
472
473    #[test]
474    fn rwlock_read_read_reentrancy() {
475        let one = RwLock::new(());
476        let _a1 = one.read();
477        // This is legal: this test suite specifically targets native, which relies
478        // on parking_lot's rw-locks, which are reentrant.
479        let _a2 = one.read();
480    }
481
482    #[test]
483    fn rwlock_short_read_foreign_read_write_reentrancy() {
484        use std::sync::Arc;
485
486        let lock = Arc::new(RwLock::new(()));
487
488        // Thread #0 grabs a read lock
489        let t0r0 = lock.read();
490
491        // Thread #1 grabs the same read lock
492        let other_thread = {
493            let lock = Arc::clone(&lock);
494            std::thread::spawn(move || {
495                let _t1r0 = lock.read();
496            })
497        };
498        other_thread.join().unwrap();
499
500        // Thread #0 releases its read lock
501        drop(t0r0);
502
503        // Thread #0 now grabs a write lock, which is legal
504        let _t0w0 = lock.write();
505    }
506
507    #[test]
508    #[should_panic]
509    fn rwlock_read_foreign_read_write_reentrancy() {
510        use std::sync::Arc;
511
512        let lock = Arc::new(RwLock::new(()));
513
514        // Thread #0 grabs a read lock
515        let _t0r0 = lock.read();
516
517        // Thread #1 grabs the same read lock
518        let other_thread = {
519            let lock = Arc::clone(&lock);
520            std::thread::spawn(move || {
521                let _t1r0 = lock.read();
522            })
523        };
524        other_thread.join().unwrap();
525
526        // Thread #0 now grabs a write lock, which should panic (read-write)
527        let _t0w0 = lock.write(); // panics
528    }
529}