diff --git a/modules/axsync/src/barrier.rs b/modules/axsync/src/barrier.rs index 4e4c39492d..355b1a38eb 100644 --- a/modules/axsync/src/barrier.rs +++ b/modules/axsync/src/barrier.rs @@ -6,10 +6,13 @@ //! //! Note: [`Barrier`] is not available when the `multitask` feature is disabled. +#[cfg(test)] +mod tests; + use core::fmt; use crate::condvar::Condvar; -use crate::mutex::Mutex; +use crate::Mutex; /// A barrier enables multiple threads to synchronize the beginning /// of some computation. @@ -99,16 +102,6 @@ impl BarrierWaitResult { /// threads will have `false` returned. /// /// [`wait`]: struct.Barrier.html#method.wait - /// - /// # Examples - /// - /// ``` - /// use spin; - /// - /// let barrier = spin::Barrier::new(1); - /// let barrier_wait_result = barrier.wait(); - /// println!("{:?}", barrier_wait_result.is_leader()); - /// ``` pub fn is_leader(&self) -> bool { self.0 } diff --git a/modules/axsync/src/barrier/tests.rs b/modules/axsync/src/barrier/tests.rs new file mode 100644 index 0000000000..78316f7654 --- /dev/null +++ b/modules/axsync/src/barrier/tests.rs @@ -0,0 +1,102 @@ +use axtask as thread; + +use crate::Barrier; + +const NUM_TASKS: u32 = 10; +const NUM_ITERS: u32 = 10_000; + +#[test] +fn test_barrier() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + static BARRIER: Barrier = Barrier::new(NUM_TASKS as usize); + + let mut join_handlers = Vec::new(); + + fn rendezvous() { + for _ in 0..NUM_ITERS { + BARRIER.wait(); + } + } + + for _ in 0..NUM_TASKS { + join_handlers.push(thread::spawn(rendezvous)); + } + + // Wait for all threads to finish. + for join_handler in join_handlers { + join_handler.join(); + } + + println!("Barrier test OK"); +} + +#[test] +fn test_wait_result() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + static BARRIER: Barrier = Barrier::new(1); + + // The first thread to call `wait` will be the leader. + assert_eq!(BARRIER.wait().is_leader(), true); + + // Since the barrier is reusable, the next thread to call `wait` will also be the leader. + assert_eq!(BARRIER.wait().is_leader(), true); + + static BARRIER2: Barrier = Barrier::new(2); + + thread::spawn(|| { + assert_eq!(BARRIER2.wait().is_leader(), true); + }); + + // The first thread to call `wait` won't be the leader. + assert_eq!(BARRIER2.wait().is_leader(), false); + + thread::yield_now(); + + println!("BarrierWaitResult test OK"); +} + +#[test] +fn test_barrier_wait_result() { + use std::sync::mpsc::{channel, TryRecvError}; + use std::sync::Arc; + + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let barrier = Arc::new(Barrier::new(NUM_TASKS as _)); + let (tx, rx) = channel(); + + let mut join_handlers = Vec::new(); + + for _ in 0..NUM_TASKS - 1 { + let c = barrier.clone(); + let tx = tx.clone(); + join_handlers.push(thread::spawn(move || { + tx.send(c.wait().is_leader()).unwrap(); + })); + } + + // At this point, all spawned threads should be blocked, + // so we shouldn't get anything from the port + assert!(matches!(rx.try_recv(), Err(TryRecvError::Empty))); + + let mut leader_found = barrier.wait().is_leader(); + + // Wait for all threads to finish. + for join_handler in join_handlers { + join_handler.join(); + } + + // Now, the barrier is cleared and we should get data. + for _ in 0..NUM_TASKS - 1 { + if rx.recv().unwrap() { + assert!(!leader_found); + leader_found = true; + } + } + assert!(leader_found); +} diff --git a/modules/axsync/src/condvar/mod.rs b/modules/axsync/src/condvar/mod.rs index 594f58da21..2775e2ec99 100644 --- a/modules/axsync/src/condvar/mod.rs +++ b/modules/axsync/src/condvar/mod.rs @@ -15,8 +15,6 @@ mod multitask; #[cfg(feature = "multitask")] pub use multitask::Condvar; - - /// A type indicating whether a timed wait on a condition variable returned /// due to a time out or not. /// @@ -35,3 +33,6 @@ impl WaitTimeoutResult { self.0 } } + +#[cfg(test)] +mod tests; diff --git a/modules/axsync/src/condvar/no_thread.rs b/modules/axsync/src/condvar/no_thread.rs index 928801ad9d..6c4f493ea2 100644 --- a/modules/axsync/src/condvar/no_thread.rs +++ b/modules/axsync/src/condvar/no_thread.rs @@ -1,7 +1,5 @@ - -use core::time::Duration; - -use crate::Mutex; +//! Dummy implementation of `Condvar` for single-threaded environments. +use crate::MutexGuard; pub struct Condvar {} @@ -17,11 +15,40 @@ impl Condvar { #[inline] pub fn notify_all(&self) {} - pub unsafe fn wait(&self, _mutex: &Mutex) { + pub fn wait<'a, T>(&self, _guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> { panic!("condvar wait not supported") } - pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool { - panic!("condvar wait not supported"); + pub fn wait_while<'a, T, F>( + &self, + mut _guard: MutexGuard<'a, T>, + mut _condition: F, + ) -> MutexGuard<'a, T> + where + F: FnMut(&mut T) -> bool, + { + panic!("condvar wait_while not supported") + } + + #[cfg(feature = "irq")] + pub fn wait_timeout<'a, T>( + &self, + _guard: MutexGuard<'a, T>, + _dur: core::time::Duration, + ) -> (MutexGuard<'a, T>, WaitTimeoutResult) { + panic!("condvar wait_timeout not supported") + } + + #[cfg(feature = "irq")] + pub fn wait_timeout_while<'a, T, F>( + &self, + mut _guard: MutexGuard<'a, T>, + _dur: core::time::Duration, + mut _condition: F, + ) -> (MutexGuard<'a, T>, WaitTimeoutResult) + where + F: FnMut(&mut T) -> bool, + { + panic!("condvar wait_timeout_while not supported") } } diff --git a/modules/axsync/src/condvar/tests.rs b/modules/axsync/src/condvar/tests.rs new file mode 100644 index 0000000000..62335237ac --- /dev/null +++ b/modules/axsync/src/condvar/tests.rs @@ -0,0 +1,175 @@ +use std::sync::{mpsc::channel, Arc}; + +use axtask as thread; + +use crate::{Condvar, Mutex}; + +const INIT_VALUE: u32 = 0; +const NUM_TASKS: u32 = 10; +const NUM_ITERS: u32 = 10_000; + +fn may_interrupt() { + // simulate interrupts + if rand::random::() % 3 == 0 { + thread::yield_now(); + } +} + +fn inc(delta: u32, pair: Arc<(Mutex, Condvar)>) { + for _ in 0..NUM_ITERS { + let (lock, cvar) = &*pair; + let mut val = lock.lock(); + *val += delta; + may_interrupt(); + drop(val); + may_interrupt(); + // We notify the condvar that the value has changed. + cvar.notify_one(); + } +} + +#[test] +fn test_wait() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let pair = Arc::new((Mutex::new(INIT_VALUE), Condvar::new())); + for _ in 0..NUM_TASKS { + let pair1 = Arc::clone(&pair); + thread::spawn(move || inc(1, pair1)); + let pair2 = Arc::clone(&pair); + thread::spawn(move || inc(2, pair2)); + } + + // Wait for the thread to start up. + let (lock, cvar) = &*pair; + let mut val = lock.lock(); + // As long as the value inside the `Mutex` is not `i`, we wait. + while *val != NUM_ITERS * NUM_TASKS * 3 { + may_interrupt(); + val = cvar.wait(val); + may_interrupt(); + } + drop(val); + + assert!(lock.lock().eq(&(NUM_ITERS * NUM_TASKS * 3))); + + println!("Condvar wait test OK"); +} + +#[test] +fn test_wait_while() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let pair = Arc::new((Mutex::new(INIT_VALUE), Condvar::new())); + for _ in 0..NUM_TASKS { + let pair1 = Arc::clone(&pair); + thread::spawn(move || inc(1, pair1)); + let pair2 = Arc::clone(&pair); + thread::spawn(move || inc(2, pair2)); + } + + // Wait for the thread to start up. + let (lock, cvar) = &*pair; + // As long as the value inside the `Mutex` is `true`, we wait. + let val = cvar.wait_while(lock.lock(), |val| *val != NUM_ITERS * NUM_TASKS * 3); + + assert!(val.eq(&(NUM_ITERS * NUM_TASKS * 3))); + + println!("Condvar wait_while test OK"); +} + +#[test] +fn smoke() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let c = Condvar::new(); + c.notify_one(); + c.notify_all(); +} + +#[test] +fn notify_one() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let m = Arc::new(Mutex::new(())); + let m2 = m.clone(); + let c = Arc::new(Condvar::new()); + let c2 = c.clone(); + + let g = m.lock(); + let _t = thread::spawn(move || { + let _g = m2.lock(); + c2.notify_one(); + }); + let g = c.wait(g); + drop(g); +} + +#[test] +fn notify_all() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let data = Arc::new((Mutex::new(0), Condvar::new())); + let (tx, rx) = channel(); + for _ in 0..NUM_TASKS { + let data = data.clone(); + let tx = tx.clone(); + thread::spawn(move || { + let &(ref lock, ref cond) = &*data; + let mut cnt = lock.lock(); + *cnt += 1; + if *cnt == NUM_TASKS { + tx.send(()).unwrap(); + } + while *cnt != 0 { + cnt = cond.wait(cnt); + } + tx.send(()).unwrap(); + }); + } + drop(tx); + + let &(ref lock, ref cond) = &*data; + // Yield manually to get tx.send() executed. + thread::yield_now(); + rx.recv().unwrap(); + + let mut cnt = lock.lock(); + *cnt = 0; + cond.notify_all(); + drop(cnt); + + for _ in 0..NUM_TASKS { + // Yield manually to get tx.send() executed. + thread::yield_now(); + rx.recv().unwrap(); + } +} + +#[test] +fn wait_while() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let pair = Arc::new((Mutex::new(false), Condvar::new())); + let pair2 = pair.clone(); + + // Inside of our lock, spawn a new thread, and then wait for it to start. + thread::spawn(move || { + let &(ref lock, ref cvar) = &*pair2; + let mut started = lock.lock(); + *started = true; + // We notify the condvar that the value has changed. + cvar.notify_one(); + }); + + // Wait for the thread to start up. + let &(ref lock, ref cvar) = &*pair; + let guard = cvar.wait_while(lock.lock(), |started| !*started); + assert!(*guard); +} diff --git a/modules/axsync/src/lib.rs b/modules/axsync/src/lib.rs index 572657d39d..508a6af7cd 100644 --- a/modules/axsync/src/lib.rs +++ b/modules/axsync/src/lib.rs @@ -42,3 +42,13 @@ pub use self::rwlock::{ MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard, }; pub use semaphore::Semaphore; + +#[cfg(test)] +mod tests { + use std::sync::{Mutex, Once}; + + /// Used for initializing the only global scheduler for test environment. + pub static INIT: Once = Once::new(); + /// Used for serializing the tests in this crate. + pub static SEQ: Mutex<()> = Mutex::new(()); +} diff --git a/modules/axsync/src/mutex.rs b/modules/axsync/src/mutex.rs index 900f234488..b0a1bf2965 100644 --- a/modules/axsync/src/mutex.rs +++ b/modules/axsync/src/mutex.rs @@ -1,5 +1,8 @@ //! A naïve sleeping mutex. +#[cfg(test)] +mod tests; + use core::cell::UnsafeCell; use core::fmt; use core::ops::{Deref, DerefMut}; @@ -209,59 +212,5 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { } pub(crate) fn guard_lock<'a, T: ?Sized>(guard: &MutexGuard<'a, T>) -> &'a Mutex { - &guard.lock -} - -#[cfg(test)] -mod tests { - use crate::Mutex; - use axtask as thread; - use std::sync::Once; - - static INIT: Once = Once::new(); - - fn may_interrupt() { - // simulate interrupts - if rand::random::() % 3 == 0 { - thread::yield_now(); - } - } - - #[test] - fn lots_and_lots() { - INIT.call_once(thread::init_scheduler); - - const NUM_TASKS: u32 = 10; - const NUM_ITERS: u32 = 10_000; - static M: Mutex = Mutex::new(0); - - fn inc(delta: u32) { - for _ in 0..NUM_ITERS { - let mut val = M.lock(); - *val += delta; - may_interrupt(); - drop(val); - may_interrupt(); - } - } - - for _ in 0..NUM_TASKS { - thread::spawn(|| inc(1)); - thread::spawn(|| inc(2)); - } - - println!("spawn OK"); - loop { - let val = M.lock(); - if *val == NUM_ITERS * NUM_TASKS * 3 { - break; - } - may_interrupt(); - drop(val); - may_interrupt(); - } - - assert_eq!(*M.lock(), NUM_ITERS * NUM_TASKS * 3); - println!("Mutex test OK"); - } + guard.lock } diff --git a/modules/axsync/src/mutex/tests.rs b/modules/axsync/src/mutex/tests.rs new file mode 100644 index 0000000000..8eb8043774 --- /dev/null +++ b/modules/axsync/src/mutex/tests.rs @@ -0,0 +1,179 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc::channel; +use std::sync::Arc; + +use axtask as thread; + +use crate::{Condvar, Mutex}; + +#[derive(Eq, PartialEq, Debug)] +struct NonCopy(i32); + +struct Packet(Arc<(Mutex, Condvar)>); + +fn may_interrupt() { + // simulate interrupts + if rand::random::() % 3 == 0 { + thread::yield_now(); + } +} + +#[test] +fn smoke() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let m = Mutex::new(()); + drop(m.lock()); + drop(m.lock()); +} + +#[test] +fn lots_and_lots() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + const NUM_TASKS: u32 = 10; + const NUM_ITERS: u32 = 10_000; + static M: Mutex = Mutex::new(0); + + fn inc(delta: u32) { + for _ in 0..NUM_ITERS { + let mut val = M.lock(); + *val += delta; + may_interrupt(); + drop(val); + may_interrupt(); + } + } + + for _ in 0..NUM_TASKS { + thread::spawn(|| inc(1)); + thread::spawn(|| inc(2)); + } + + println!("spawn OK"); + loop { + let val = M.lock(); + if *val == NUM_ITERS * NUM_TASKS * 3 { + break; + } + may_interrupt(); + drop(val); + may_interrupt(); + } + + assert_eq!(*M.lock(), NUM_ITERS * NUM_TASKS * 3); + println!("Mutex test OK"); +} + +#[test] +fn try_lock() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let m = Mutex::new(()); + *m.try_lock().unwrap() = (); +} + +#[test] +fn test_into_inner() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let m = Mutex::new(NonCopy(10)); + assert_eq!(m.into_inner(), NonCopy(10)); +} + +#[test] +fn test_into_inner_drop() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = Mutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); +} + +#[test] +fn test_get_mut() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let mut m = Mutex::new(NonCopy(10)); + *m.get_mut() = NonCopy(20); + assert_eq!(m.into_inner(), NonCopy(20)); +} + +#[test] +fn test_mutex_arc_condvar() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); + let packet2 = Packet(packet.0.clone()); + let (tx, rx) = channel(); + let _t = thread::spawn(move || { + // wait until parent gets in + rx.recv().unwrap(); + let &(ref lock, ref cvar) = &*packet2.0; + let mut lock = lock.lock(); + *lock = true; + cvar.notify_one(); + }); + + let &(ref lock, ref cvar) = &*packet.0; + let mut lock = lock.lock(); + tx.send(()).unwrap(); + assert!(!*lock); + while !*lock { + lock = cvar.wait(lock); + } +} + +#[test] +fn test_mutex_arc_nested() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + // Tests nested mutexes and access + // to underlying data. + let arc = Arc::new(Mutex::new(1)); + let arc2 = Arc::new(Mutex::new(arc)); + let (tx, rx) = channel(); + let _t = thread::spawn(move || { + let lock = arc2.lock(); + let lock2 = lock.lock(); + assert_eq!(*lock2, 1); + tx.send(()).unwrap(); + }); + // Yield manually to get tx.send() executed. + thread::yield_now(); + rx.recv().unwrap(); +} + +#[test] +fn test_mutex_unsized() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); + { + let b = &mut *mutex.lock(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*mutex.lock(), comp); +} diff --git a/modules/axsync/src/rwlock/mod.rs b/modules/axsync/src/rwlock/mod.rs index 15c3294aa9..c20d0a3486 100644 --- a/modules/axsync/src/rwlock/mod.rs +++ b/modules/axsync/src/rwlock/mod.rs @@ -20,6 +20,9 @@ mod no_thread; #[cfg(not(feature = "multitask"))] use no_thread as sys; +#[cfg(test)] +mod tests; + /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any @@ -508,7 +511,7 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { let orig = ManuallyDrop::new(orig); MappedRwLockReadGuard { data, - inner_lock: &orig.inner_lock, + inner_lock: orig.inner_lock, } } @@ -542,7 +545,7 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> { let orig = ManuallyDrop::new(orig); Ok(MappedRwLockReadGuard { data, - inner_lock: &orig.inner_lock, + inner_lock: orig.inner_lock, }) } None => Err(orig), @@ -577,7 +580,7 @@ impl<'a, T: ?Sized> MappedRwLockReadGuard<'a, T> { let orig = ManuallyDrop::new(orig); MappedRwLockReadGuard { data, - inner_lock: &orig.inner_lock, + inner_lock: orig.inner_lock, } } @@ -611,7 +614,7 @@ impl<'a, T: ?Sized> MappedRwLockReadGuard<'a, T> { let orig = ManuallyDrop::new(orig); Ok(MappedRwLockReadGuard { data, - inner_lock: &orig.inner_lock, + inner_lock: orig.inner_lock, }) } None => Err(orig), diff --git a/modules/axsync/src/rwlock/multitask.rs b/modules/axsync/src/rwlock/multitask.rs index 803458dc85..b80489908d 100644 --- a/modules/axsync/src/rwlock/multitask.rs +++ b/modules/axsync/src/rwlock/multitask.rs @@ -1,5 +1,4 @@ /// A multi-tasking-friendly RwLock implementation. - use core::sync::atomic::AtomicU32; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; @@ -297,14 +296,13 @@ impl RwLock { } // If readers are waiting, wake them all up. - if state == READERS_WAITING { - if self + if state == READERS_WAITING + && self .state .compare_exchange(state, 0, Relaxed, Relaxed) .is_ok() - { - self.state_wq.notify_all(true); - } + { + self.state_wq.notify_all(true); } } diff --git a/modules/axsync/src/rwlock/tests.rs b/modules/axsync/src/rwlock/tests.rs new file mode 100644 index 0000000000..a62d4b6115 --- /dev/null +++ b/modules/axsync/src/rwlock/tests.rs @@ -0,0 +1,242 @@ +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc::channel; +use std::sync::Arc; + +use axtask as thread; + +use crate::{ + MappedRwLockReadGuard, MappedRwLockWriteGuard, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, +}; + +const NUM_TASKS: u32 = 10; +const NUM_ITERS: u32 = 10_000; + +#[derive(Eq, PartialEq, Debug)] +struct NonCopy(i32); + +#[test] +fn smoke() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let l = RwLock::new(()); + drop(l.read()); + drop(l.write()); + drop((l.read(), l.read())); + drop(l.write()); +} + +fn ramdom_bool() -> bool { + rand::random::() % 2 == 0 +} + +#[test] +fn frob() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let r = Arc::new(RwLock::new(())); + + let (tx, rx) = channel::<()>(); + for _ in 0..NUM_TASKS { + let tx = tx.clone(); + let r = r.clone(); + thread::spawn(move || { + for _ in 0..NUM_ITERS { + if ramdom_bool() { + drop(r.write()); + } else { + drop(r.read()); + } + } + drop(tx); + }); + } + drop(tx); + // Yield manually to get tx.send() executed. + thread::yield_now(); + let _ = rx.recv(); +} + +#[test] +fn test_rw_arc() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let arc = Arc::new(RwLock::new(0)); + let arc2 = arc.clone(); + let (tx, rx) = channel(); + + thread::spawn(move || { + let mut lock = arc2.write(); + for _ in 0..10 { + let tmp = *lock; + *lock = -1; + thread::yield_now(); + *lock = tmp + 1; + } + tx.send(()).unwrap(); + }); + + // Readers try to catch the writer in the act + let mut children = Vec::new(); + for _ in 0..5 { + let arc3 = arc.clone(); + children.push(thread::spawn(move || { + let lock = arc3.read(); + assert!(*lock >= 0); + })); + } + + // Wait for children to pass their asserts + for r in children { + assert!(r.join().unwrap() == 0); + } + + // Wait for writer to finish + rx.recv().unwrap(); + let lock = arc.read(); + assert_eq!(*lock, 10); +} + +#[test] +fn test_rwlock_unsized() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); + { + let b = &mut *rw.write(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*rw.read(), comp); +} + +#[test] +fn test_rwlock_try_write() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let lock = RwLock::new(0isize); + let read_guard = lock.read(); + + let write_result = lock.try_write(); + match write_result { + None => (), + Some(_) => assert!( + false, + "try_write should not succeed while read_guard is in scope" + ), + } + + drop(read_guard); + let mapped_read_guard = RwLockReadGuard::map(lock.read(), |_| &()); + + let write_result = lock.try_write(); + match write_result { + None => (), + Some(_) => assert!( + false, + "try_write should not succeed while mapped_read_guard is in scope" + ), + } + + drop(mapped_read_guard); +} + +#[test] +fn test_into_inner() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let m = RwLock::new(NonCopy(10)); + assert_eq!(m.into_inner(), NonCopy(10)); +} + +#[test] +fn test_into_inner_drop() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = RwLock::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); +} + +#[test] +fn test_get_mut() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let mut m = RwLock::new(NonCopy(10)); + *m.get_mut() = NonCopy(20); + assert_eq!(m.into_inner(), NonCopy(20)); +} + +#[test] +fn test_read_guard_covariance() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {} + let j: i32 = 5; + let lock = RwLock::new(&j); + { + let i = 6; + do_stuff(lock.read(), &i); + } + drop(lock); +} + +#[test] +fn test_mapped_read_guard_covariance() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {} + let j: i32 = 5; + let lock = RwLock::new((&j, &j)); + { + let i = 6; + let guard = lock.read(); + let guard = RwLockReadGuard::map(guard, |(val, _val)| val); + do_stuff(guard, &i); + } + drop(lock); +} + +#[test] +fn test_mapping_mapped_guard() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let arr = [0; 4]; + let mut lock = RwLock::new(arr); + let guard = lock.write(); + let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]); + let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]); + assert_eq!(guard.len(), 1); + guard[0] = 42; + drop(guard); + assert_eq!(*lock.get_mut(), [0, 42, 0, 0]); + + let guard = lock.read(); + let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]); + let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]); + assert_eq!(*guard, [42]); + drop(guard); + assert_eq!(*lock.get_mut(), [0, 42, 0, 0]); +} diff --git a/modules/axsync/src/semaphore.rs b/modules/axsync/src/semaphore.rs index ac5bbecfb6..a2593176c2 100644 --- a/modules/axsync/src/semaphore.rs +++ b/modules/axsync/src/semaphore.rs @@ -6,6 +6,9 @@ //! //! Note: [`Semaphore`] is not available when the `multitask` feature is disabled. +#[cfg(test)] +mod tests; + use crate::{Condvar, Mutex}; /// A counting, blocking, semaphore. @@ -77,122 +80,3 @@ impl<'a> Drop for SemaphoreGuard<'a> { self.sem.release(); } } - -#[cfg(test)] -mod tests { - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::sync::Once; - - use axtask as thread; - - use crate::Semaphore; - - static INIT: Once = Once::new(); - - #[test] - fn test_sem_acquire_release() { - INIT.call_once(thread::init_scheduler); - - let s = Semaphore::new(1); - s.acquire(); - s.release(); - s.acquire(); - } - - #[test] - fn test_sem_basic() { - INIT.call_once(thread::init_scheduler); - - let s = Semaphore::new(1); - let _g = s.access(); - } - - #[test] - fn test_sem_as_mutex() { - INIT.call_once(thread::init_scheduler); - - let s = Arc::new(Semaphore::new(1)); - let s2 = s.clone(); - let _t = thread::spawn(move || { - let _g = s2.access(); - }); - let _g = s.access(); - } - - #[test] - fn test_sem_as_cvar() { - INIT.call_once(thread::init_scheduler); - - // Child waits and parent signals - let (tx, rx) = channel(); - let s = Arc::new(Semaphore::new(0)); - let s2 = s.clone(); - let _t = thread::spawn(move || { - s2.acquire(); - tx.send(()).unwrap(); - }); - s.release(); - thread::yield_now(); - let _ = rx.recv(); - - // Parent waits and child signals - let (tx, rx) = channel(); - let s = Arc::new(Semaphore::new(0)); - let s2 = s.clone(); - let _t = thread::spawn(move || { - s2.release(); - thread::yield_now(); - let _ = rx.recv(); - }); - s.acquire(); - tx.send(()).unwrap(); - thread::yield_now(); - } - - #[test] - fn test_sem_multi_resource() { - INIT.call_once(thread::init_scheduler); - - // Parent and child both get in the critical section at the same - // time, and shake hands. - let s = Arc::new(Semaphore::new(2)); - let s2 = s.clone(); - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - let _t = thread::spawn(move || { - let _g = s2.access(); - thread::yield_now(); - let _ = rx2.recv(); - tx1.send(()).unwrap(); - }); - let _g = s.access(); - thread::yield_now(); - tx2.send(()).unwrap(); - thread::yield_now(); - rx1.recv().unwrap(); - } - - #[test] - fn test_sem_runtime_friendly_blocking() { - INIT.call_once(thread::init_scheduler); - - let s = Arc::new(Semaphore::new(1)); - let s2 = s.clone(); - let (tx, rx) = channel(); - { - let _g = s.access(); - thread::spawn(move || { - tx.send(()).unwrap(); - thread::yield_now(); - drop(s2.access()); - tx.send(()).unwrap(); - thread::yield_now(); - }); - thread::yield_now(); - rx.recv().unwrap(); // wait for child to come alive - } - thread::yield_now(); - rx.recv().unwrap(); // wait for child to be done - } -} diff --git a/modules/axsync/src/semaphore/tests.rs b/modules/axsync/src/semaphore/tests.rs new file mode 100644 index 0000000000..88ad5624e6 --- /dev/null +++ b/modules/axsync/src/semaphore/tests.rs @@ -0,0 +1,118 @@ +use std::sync::mpsc::channel; +use std::sync::Arc; + +use axtask as thread; + +use crate::Semaphore; + +#[test] +fn test_sem_acquire_release() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let s = Semaphore::new(1); + s.acquire(); + s.release(); + s.acquire(); +} + +#[test] +fn test_sem_basic() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let s = Semaphore::new(1); + let _g = s.access(); +} + +#[test] +fn test_sem_as_mutex() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let s = Arc::new(Semaphore::new(1)); + let s2 = s.clone(); + let _t = thread::spawn(move || { + let _g = s2.access(); + }); + let _g = s.access(); +} + +#[test] +fn test_sem_as_cvar() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + // Child waits and parent signals + let (tx, rx) = channel(); + let s = Arc::new(Semaphore::new(0)); + let s2 = s.clone(); + let _t = thread::spawn(move || { + s2.acquire(); + tx.send(()).unwrap(); + }); + s.release(); + thread::yield_now(); + let _ = rx.recv(); + + // Parent waits and child signals + let (tx, rx) = channel(); + let s = Arc::new(Semaphore::new(0)); + let s2 = s.clone(); + let _t = thread::spawn(move || { + s2.release(); + thread::yield_now(); + let _ = rx.recv(); + }); + s.acquire(); + tx.send(()).unwrap(); + thread::yield_now(); +} + +#[test] +fn test_sem_multi_resource() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + // Parent and child both get in the critical section at the same + // time, and shake hands. + let s = Arc::new(Semaphore::new(2)); + let s2 = s.clone(); + let (tx1, rx1) = channel(); + let (tx2, rx2) = channel(); + let _t = thread::spawn(move || { + let _g = s2.access(); + thread::yield_now(); + let _ = rx2.recv(); + tx1.send(()).unwrap(); + }); + let _g = s.access(); + thread::yield_now(); + tx2.send(()).unwrap(); + thread::yield_now(); + rx1.recv().unwrap(); +} + +#[test] +fn test_sem_runtime_friendly_blocking() { + let _lock = crate::tests::SEQ.lock(); + crate::tests::INIT.call_once(thread::init_scheduler); + + let s = Arc::new(Semaphore::new(1)); + let s2 = s.clone(); + let (tx, rx) = channel(); + { + let _g = s.access(); + thread::spawn(move || { + tx.send(()).unwrap(); + thread::yield_now(); + drop(s2.access()); + tx.send(()).unwrap(); + thread::yield_now(); + }); + thread::yield_now(); + rx.recv().unwrap(); // wait for child to come alive + } + thread::yield_now(); + rx.recv().unwrap(); // wait for child to be done +} diff --git a/modules/axtask/src/tests.rs b/modules/axtask/src/tests.rs index 47a85e83ed..97bcdbb077 100644 --- a/modules/axtask/src/tests.rs +++ b/modules/axtask/src/tests.rs @@ -4,11 +4,11 @@ use std::sync::{Mutex, Once}; use crate::{api as axtask, current, WaitQueue}; static INIT: Once = Once::new(); -static SERIAL: Mutex<()> = Mutex::new(()); +static SEQ: Mutex<()> = Mutex::new(()); #[test] fn test_sched_fifo() { - let _lock = SERIAL.lock(); + let _lock = SEQ.lock(); INIT.call_once(axtask::init_scheduler); const NUM_TASKS: usize = 10; @@ -34,7 +34,7 @@ fn test_sched_fifo() { #[test] fn test_fp_state_switch() { - let _lock = SERIAL.lock(); + let _lock = SEQ.lock(); INIT.call_once(axtask::init_scheduler); const NUM_TASKS: usize = 5; @@ -65,7 +65,7 @@ fn test_fp_state_switch() { #[test] fn test_wait_queue() { - let _lock = SERIAL.lock(); + let _lock = SEQ.lock(); INIT.call_once(axtask::init_scheduler); const NUM_TASKS: usize = 10; @@ -106,7 +106,7 @@ fn test_wait_queue() { #[test] fn test_task_join() { - let _lock = SERIAL.lock(); + let _lock = SEQ.lock(); INIT.call_once(axtask::init_scheduler); const NUM_TASKS: usize = 10;