Change to a simpler mutex API

pull/84/head
Andre Richter 4 years ago
parent 02f9f34af3
commit 5c193d5803
No known key found for this signature in database
GPG Key ID: 2116C1AB102F615E

@ -137,7 +137,7 @@ diff -uNr 04_zero_overhead_abstraction/src/bsp/raspberrypi/console.rs 05_safe_gl
}
Ok(())
@@ -41,7 +80,39 @@
@@ -41,7 +80,37 @@
// Public Code
//--------------------------------------------------------------------------------------------------
@ -168,15 +168,13 @@ diff -uNr 04_zero_overhead_abstraction/src/bsp/raspberrypi/console.rs 05_safe_gl
+ fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
+ // Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
+ // readability.
+ let mut r = &self.inner;
+ r.lock(|inner| fmt::Write::write_fmt(inner, args))
+ self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
+ }
+}
+
+impl console::interface::Statistics for QEMUOutput {
+ fn chars_written(&self) -> usize {
+ let mut r = &self.inner;
+ r.lock(|inner| inner.chars_written)
+ self.inner.lock(|inner| inner.chars_written)
+ }
}
@ -252,12 +250,17 @@ diff -uNr 04_zero_overhead_abstraction/src/main.rs 05_safe_globals/src/main.rs
diff -uNr 04_zero_overhead_abstraction/src/synchronization.rs 05_safe_globals/src/synchronization.rs
--- 04_zero_overhead_abstraction/src/synchronization.rs
+++ 05_safe_globals/src/synchronization.rs
@@ -0,0 +1,92 @@
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
+
+//! Synchronization primitives.
+//!
+//! Suggested literature:
+//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
+//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
+//! - https://doc.rust-lang.org/std/cell/index.html
+
+use core::cell::UnsafeCell;
+
@ -268,50 +271,28 @@ diff -uNr 04_zero_overhead_abstraction/src/synchronization.rs 05_safe_globals/sr
+/// Synchronization interfaces.
+pub mod interface {
+
+ /// Any object implementing this trait guarantees exclusive access to the data contained within
+ /// Any object implementing this trait guarantees exclusive access to the data wrapped within
+ /// the Mutex for the duration of the provided closure.
+ ///
+ /// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
+ /// such as [deadlock prevention].
+ ///
+ /// # Example
+ ///
+ /// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
+ /// best implemented **for a reference to a container struct**, and has a usage pattern that
+ /// might feel strange at first:
+ ///
+ /// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
+ /// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
+ ///
+ /// ```
+ /// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
+ ///
+ /// fn foo() {
+ /// let mut r = &MUT; // Note that r is mutable
+ /// r.lock(|data| *data += 1);
+ /// }
+ /// ```
+ pub trait Mutex {
+ /// The type of encapsulated data.
+ /// The type of the data that is wrapped by this mutex.
+ type Data;
+
+ /// Creates a critical section and grants temporary mutable access to the encapsulated data.
+ fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
+ /// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
+ fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
+ }
+}
+
+/// A pseudo-lock for teaching purposes.
+///
+/// Used to introduce [interior mutability].
+///
+/// In contrast to a real Mutex implementation, does not protect against concurrent access from
+/// other cores to the contained data. This part is preserved for later lessons.
+///
+/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
+/// executing single-threaded, aka only running on a single core with interrupts disabled.
+///
+/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
+pub struct NullLock<T: ?Sized> {
+pub struct NullLock<T>
+where
+ T: ?Sized,
+{
+ data: UnsafeCell<T>,
+}
+
@ -319,10 +300,11 @@ diff -uNr 04_zero_overhead_abstraction/src/synchronization.rs 05_safe_globals/sr
+// Public Code
+//--------------------------------------------------------------------------------------------------
+
+unsafe impl<T: ?Sized> Sync for NullLock<T> {}
+unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
+unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
+
+impl<T> NullLock<T> {
+ /// Wraps `data` into a new `NullLock`.
+ /// Create an instance.
+ pub const fn new(data: T) -> Self {
+ Self {
+ data: UnsafeCell::new(data),
@ -334,10 +316,10 @@ diff -uNr 04_zero_overhead_abstraction/src/synchronization.rs 05_safe_globals/sr
+// OS Interface Code
+//------------------------------------------------------------------------------
+
+impl<T> interface::Mutex for &NullLock<T> {
+impl<T> interface::Mutex for NullLock<T> {
+ type Data = T;
+
+ fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
+ fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
+ // In a real lock, there would be code encapsulating this line that ensures that this
+ // mutable reference will ever only be given out once at a time.
+ let data = unsafe { &mut *self.data.get() };

@ -105,14 +105,12 @@ impl console::interface::Write for QEMUOutput {
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
}
impl console::interface::Statistics for QEMUOutput {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -205,7 +205,7 @@ diff -uNr 05_safe_globals/src/_arch/aarch64/cpu.rs 06_drivers_gpio_uart/src/_arc
diff -uNr 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs
--- 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs
+++ 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs
@@ -0,0 +1,222 @@
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2018-2020 Andre Richter <andre.o.richter@gmail.com>
@ -413,8 +413,7 @@ diff -uNr 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs 06_drivers_g
+
+ /// Concurrency safe version of `GPIOInner.map_pl011_uart()`
+ pub fn map_pl011_uart(&self) {
+ let mut r = &self.inner;
+ r.lock(|inner| inner.map_pl011_uart())
+ self.inner.lock(|inner| inner.map_pl011_uart())
+ }
+}
+
@ -432,7 +431,7 @@ diff -uNr 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs 06_drivers_g
diff -uNr 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
--- 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
+++ 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
@@ -0,0 +1,311 @@
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2018-2020 Andre Richter <andre.o.richter@gmail.com>
@ -685,8 +684,7 @@ diff -uNr 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 06_dri
+ }
+
+ unsafe fn init(&self) -> Result<(), &'static str> {
+ let mut r = &self.inner;
+ r.lock(|inner| inner.init());
+ self.inner.lock(|inner| inner.init());
+
+ Ok(())
+ }
@ -696,22 +694,19 @@ diff -uNr 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 06_dri
+ /// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
+ /// serialize access.
+ fn write_char(&self, c: char) {
+ let mut r = &self.inner;
+ r.lock(|inner| inner.write_char(c));
+ self.inner.lock(|inner| inner.write_char(c));
+ }
+
+ fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
+ // Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
+ // readability.
+ let mut r = &self.inner;
+ r.lock(|inner| fmt::Write::write_fmt(inner, args))
+ self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
+ }
+}
+
+impl console::interface::Read for PL011Uart {
+ fn read_char(&self) -> char {
+ let mut r = &self.inner;
+ r.lock(|inner| {
+ self.inner.lock(|inner| {
+ // Spin while RX FIFO empty is set.
+ while inner.registers.FR.matches_all(FR::RXFE::SET) {
+ cpu::nop();
@ -735,13 +730,11 @@ diff -uNr 05_safe_globals/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 06_dri
+
+impl console::interface::Statistics for PL011Uart {
+ fn chars_written(&self) -> usize {
+ let mut r = &self.inner;
+ r.lock(|inner| inner.chars_written)
+ self.inner.lock(|inner| inner.chars_written)
+ }
+
+ fn chars_read(&self) -> usize {
+ let mut r = &self.inner;
+ r.lock(|inner| inner.chars_read)
+ self.inner.lock(|inner| inner.chars_read)
+ }
+}
@ -824,7 +817,7 @@ diff -uNr 05_safe_globals/src/bsp/device_driver.rs 06_drivers_gpio_uart/src/bsp/
diff -uNr 05_safe_globals/src/bsp/raspberrypi/console.rs 06_drivers_gpio_uart/src/bsp/raspberrypi/console.rs
--- 05_safe_globals/src/bsp/raspberrypi/console.rs
+++ 06_drivers_gpio_uart/src/bsp/raspberrypi/console.rs
@@ -4,115 +4,34 @@
@@ -4,113 +4,34 @@
//! BSP console facilities.
@ -946,15 +939,13 @@ diff -uNr 05_safe_globals/src/bsp/raspberrypi/console.rs 06_drivers_gpio_uart/sr
- fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
- // Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
- // readability.
- let mut r = &self.inner;
- r.lock(|inner| fmt::Write::write_fmt(inner, args))
- self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
- }
-}
-
-impl console::interface::Statistics for QEMUOutput {
- fn chars_written(&self) -> usize {
- let mut r = &self.inner;
- r.lock(|inner| inner.chars_written)
- self.inner.lock(|inner| inner.chars_written)
- }
+ &super::PL011_UART
}

@ -205,8 +205,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,22 +260,19 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
}
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -300,12 +296,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -218,15 +218,14 @@ diff -uNr 06_drivers_gpio_uart/src/_arch/aarch64/cpu.rs 07_uart_chainloader/src/
diff -uNr 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 07_uart_chainloader/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
--- 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
+++ 07_uart_chainloader/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
@@ -271,6 +271,16 @@
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
@@ -268,6 +268,15 @@
// readability.
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
+
+ fn flush(&self) {
+ // Spin until TX FIFO empty is set.
+ let mut r = &self.inner;
+ r.lock(|inner| {
+ self.inner.lock(|inner| {
+ while !inner.registers.FR.matches_all(FR::TXFE::SET) {
+ cpu::nop();
+ }
@ -235,7 +234,7 @@ diff -uNr 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 0
}
impl console::interface::Read for PL011Uart {
@@ -282,18 +292,21 @@
@@ -278,18 +287,20 @@
cpu::nop();
}
@ -257,8 +256,7 @@ diff -uNr 06_drivers_gpio_uart/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 0
+ }
+
+ fn clear(&self) {
+ let mut r = &self.inner;
+ r.lock(|inner| {
+ self.inner.lock(|inner| {
+ // Read from the RX FIFO until it is indicating empty.
+ while !inner.registers.FR.matches_all(FR::RXFE::SET) {
+ inner.registers.DR.get();

@ -205,8 +205,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -301,8 +296,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -313,12 +307,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -309,7 +309,7 @@ diff -uNr 07_uart_chainloader/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs 08_times
diff -uNr 07_uart_chainloader/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs 08_timestamps/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
--- 07_uart_chainloader/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
+++ 08_timestamps/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
@@ -292,11 +292,18 @@
@@ -287,11 +287,18 @@
cpu::nop();
}

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -308,8 +303,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -320,12 +314,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -308,8 +303,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -320,12 +314,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -294,7 +294,7 @@ diff -uNr 09_hw_debug_JTAG/src/_arch/aarch64/cpu.rs 10_privilege_level/src/_arch
diff -uNr 09_hw_debug_JTAG/src/_arch/aarch64/exception/asynchronous.rs 10_privilege_level/src/_arch/aarch64/exception/asynchronous.rs
--- 09_hw_debug_JTAG/src/_arch/aarch64/exception/asynchronous.rs
+++ 10_privilege_level/src/_arch/aarch64/exception/asynchronous.rs
@@ -0,0 +1,71 @@
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2018-2020 Andre Richter <andre.o.richter@gmail.com>
@ -344,7 +344,10 @@ diff -uNr 09_hw_debug_JTAG/src/_arch/aarch64/exception/asynchronous.rs 10_privil
+ }
+}
+
+fn is_masked<T: DaifField>() -> bool {
+fn is_masked<T>() -> bool
+where
+ T: DaifField,
+{
+ DAIF.is_set(T::daif_field())
+}
+

@ -47,7 +47,10 @@ impl DaifField for FIQ {
}
}
fn is_masked<T: DaifField>() -> bool {
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -308,8 +303,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -320,12 +314,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -47,7 +47,10 @@ impl DaifField for FIQ {
}
}
fn is_masked<T: DaifField>() -> bool {
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -308,8 +303,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -320,12 +314,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -47,7 +47,10 @@ impl DaifField for FIQ {
}
}
fn is_masked<T: DaifField>() -> bool {
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -308,8 +303,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -320,12 +314,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -47,7 +47,10 @@ impl DaifField for FIQ {
}
}
fn is_masked<T: DaifField>() -> bool {
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -308,8 +303,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -320,12 +314,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

@ -240,8 +240,7 @@ this tutorial, the `RX IRQ` and the `RX Timeout IRQ` will be configured. This me
```rust
impl exception::asynchronous::interface::IRQHandler for PL011Uart {
fn handle(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
let pending = inner.registers.MIS.extract();
// Clear all pending IRQs.
@ -401,10 +400,10 @@ access so far, we also want it protected against reentrancy now. Therefore, we u
`NullLocks` to `IRQSafeNullocks`:
```rust
impl<T> interface::Mutex for &IRQSafeNullLock<T> {
impl<T> interface::Mutex for IRQSafeNullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };
@ -519,10 +518,10 @@ time"_. For the `InitStateLock`, we only implement the `read()` and `write()` fu
[are characterized]: https://doc.rust-lang.org/std/sync/struct.RwLock.html
```rust
impl<T> interface::ReadWriteEx for &InitStateLock<T> {
impl<T> interface::ReadWriteEx for InitStateLock<T> {
type Data = T;
fn write<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
assert!(
state::state_manager().is_init(),
"InitStateLock::write called after kernel init phase"
@ -537,7 +536,7 @@ impl<T> interface::ReadWriteEx for &InitStateLock<T> {
f(data)
}
fn read<R>(&mut self, f: impl FnOnce(&Self::Data) -> R) -> R {
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R {
let data = unsafe { &*self.data.get() };
f(data)
@ -772,7 +771,7 @@ diff -uNr 13_integrated_testing/src/_arch/aarch64/exception/asynchronous.rs 14_e
trait DaifField {
fn daif_field() -> register::Field<u64, DAIF::Register>;
}
@@ -55,6 +59,71 @@
@@ -58,6 +62,71 @@
// Public Code
//--------------------------------------------------------------------------------------------------
@ -1016,7 +1015,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2/gicc.rs 14_excep
diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2/gicd.rs 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gicd.rs
--- 13_integrated_testing/src/bsp/device_driver/arm/gicv2/gicd.rs
+++ 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gicd.rs
@@ -0,0 +1,197 @@
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
@ -1170,8 +1169,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2/gicd.rs 14_excep
+ // Target all SPIs to the boot core only.
+ let mask = self.local_gic_target_mask();
+
+ let mut r = &self.shared_registers;
+ r.lock(|regs| {
+ self.shared_registers.lock(|regs| {
+ for i in regs.implemented_itargets_slice().iter() {
+ i.write(
+ ITARGETSR::Offset3.val(mask)
@ -1205,8 +1203,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2/gicd.rs 14_excep
+ _ => {
+ let enable_reg_index_shared = enable_reg_index - 1;
+
+ let mut r = &self.shared_registers;
+ r.lock(|regs| {
+ self.shared_registers.lock(|regs| {
+ let enable_reg = &regs.ISENABLER[enable_reg_index_shared];
+ enable_reg.set(enable_reg.get() | enable_bit);
+ });
@ -1218,7 +1215,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2/gicd.rs 14_excep
diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2.rs 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2.rs
--- 13_integrated_testing/src/bsp/device_driver/arm/gicv2.rs
+++ 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2.rs
@@ -0,0 +1,222 @@
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
@ -1379,8 +1376,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2.rs 14_exceptions
+ irq_number: Self::IRQNumberType,
+ descriptor: exception::asynchronous::IRQDescriptor,
+ ) -> Result<(), &'static str> {
+ let mut r = &self.handler_table;
+ r.write(|table| {
+ self.handler_table.write(|table| {
+ let irq_number = irq_number.get();
+
+ if table[irq_number].is_some() {
@ -1411,8 +1407,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2.rs 14_exceptions
+ }
+
+ // Call the IRQ handler. Panic if there is none.
+ let mut r = &self.handler_table;
+ r.read(|table| {
+ self.handler_table.read(|table| {
+ match table[irq_number] {
+ None => panic!("No handler registered for IRQ {}", irq_number),
+ Some(descriptor) => {
@ -1431,8 +1426,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/arm/gicv2.rs 14_exceptions
+
+ info!(" Peripheral handler:");
+
+ let mut r = &self.handler_table;
+ r.read(|table| {
+ self.handler_table.read(|table| {
+ for (i, opt) in table.iter().skip(32).enumerate() {
+ if let Some(handler) = opt {
+ info!(" {: >3}. {}", i + 32, handler.name);
@ -1490,7 +1484,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_gpio.rs 14_exc
diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_interrupt_controller/peripheral_ic.rs 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_interrupt_controller/peripheral_ic.rs
--- 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_interrupt_controller/peripheral_ic.rs
+++ 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_interrupt_controller/peripheral_ic.rs
@@ -0,0 +1,167 @@
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
@ -1594,8 +1588,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_interrupt_cont
+ irq: Self::IRQNumberType,
+ descriptor: exception::asynchronous::IRQDescriptor,
+ ) -> Result<(), &'static str> {
+ let mut r = &self.handler_table;
+ r.write(|table| {
+ self.handler_table.write(|table| {
+ let irq_number = irq.get();
+
+ if table[irq_number].is_some() {
@ -1609,8 +1602,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_interrupt_cont
+ }
+
+ fn enable(&self, irq: Self::IRQNumberType) {
+ let mut r = &self.wo_registers;
+ r.lock(|regs| {
+ self.wo_registers.lock(|regs| {
+ let enable_reg = if irq.get() <= 31 {
+ &regs.ENABLE_1
+ } else {
@ -1629,8 +1621,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_interrupt_cont
+ &'irq_context self,
+ _ic: &exception::asynchronous::IRQContext<'irq_context>,
+ ) {
+ let mut r = &self.handler_table;
+ r.read(|table| {
+ self.handler_table.read(|table| {
+ for irq_number in self.pending_irqs() {
+ match table[irq_number] {
+ None => panic!("No handler registered for IRQ {}", irq_number),
@ -1648,8 +1639,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_interrupt_cont
+
+ info!(" Peripheral handler:");
+
+ let mut r = &self.handler_table;
+ r.read(|table| {
+ self.handler_table.read(|table| {
+ for (i, opt) in table.iter().enumerate() {
+ if let Some(handler) = opt {
+ info!(" {: >3}. {}", i, handler.name);
@ -1961,7 +1951,7 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
}
}
}
@@ -255,6 +346,21 @@
@@ -254,6 +345,21 @@
Ok(())
}
@ -1983,11 +1973,11 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
}
impl console::interface::Write for PL011Uart {
@@ -286,25 +392,7 @@
@@ -281,25 +387,8 @@
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
- r.lock(|inner| {
- self.inner.lock(|inner| {
- // Spin while RX FIFO empty is set.
- while inner.registers.FR.matches_all(FR::RXFE::SET) {
- cpu::nop();
@ -2006,19 +1996,19 @@ diff -uNr 13_integrated_testing/src/bsp/device_driver/bcm/bcm2xxx_pl011_uart.rs
-
- ret
- })
+ r.lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
+ self.inner
+ .lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
}
fn clear(&self) {
@@ -329,3 +417,25 @@
r.lock(|inner| inner.chars_read)
@@ -321,3 +410,24 @@
self.inner.lock(|inner| inner.chars_read)
}
}
+
+impl exception::asynchronous::interface::IRQHandler for PL011Uart {
+ fn handle(&self) -> Result<(), &'static str> {
+ let mut r = &self.inner;
+ r.lock(|inner| {
+ self.inner.lock(|inner| {
+ let pending = inner.registers.MIS.extract();
+
+ // Clear all pending IRQs.
@ -2613,9 +2603,9 @@ diff -uNr 13_integrated_testing/src/state.rs 14_exceptions_part2_peripheral_IRQs
diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_peripheral_IRQs/src/synchronization.rs
--- 13_integrated_testing/src/synchronization.rs
+++ 14_exceptions_part2_peripheral_IRQs/src/synchronization.rs
@@ -43,6 +43,21 @@
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
@@ -27,6 +27,21 @@
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
+
+ /// A reader-writer exclusion type.
@ -2627,43 +2617,46 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip
+ type Data;
+
+ /// Grants temporary mutable access to the encapsulated data.
+ fn write<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
+ fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
+
+ /// Grants temporary immutable access to the encapsulated data.
+ fn read<R>(&mut self, f: impl FnOnce(&Self::Data) -> R) -> R;
+ fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R;
+ }
}
/// A pseudo-lock for teaching purposes.
@@ -53,10 +68,17 @@
@@ -35,8 +50,18 @@
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
-/// executing single-threaded, aka only running on a single core with interrupts disabled.
-pub struct NullLock<T>
+/// executing on a single core.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
-pub struct NullLock<T: ?Sized> {
+pub struct IRQSafeNullLock<T: ?Sized> {
+pub struct IRQSafeNullLock<T>
+where
+ T: ?Sized,
+{
+ data: UnsafeCell<T>,
+}
+
+/// A pseudo-lock that is RW during the single-core kernel init phase and RO afterwards.
+///
+/// Intended to encapsulate data that is populated during kernel init when no concurrency exists.
+pub struct InitStateLock<T: ?Sized> {
data: UnsafeCell<T>,
}
@@ -64,10 +86,20 @@
+pub struct InitStateLock<T>
where
T: ?Sized,
{
@@ -47,10 +72,22 @@
// Public Code
//--------------------------------------------------------------------------------------------------
-unsafe impl<T: ?Sized> Sync for NullLock<T> {}
+unsafe impl<T: ?Sized> Sync for IRQSafeNullLock<T> {}
-unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
-unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
+unsafe impl<T> Send for IRQSafeNullLock<T> where T: ?Sized + Send {}
+unsafe impl<T> Sync for IRQSafeNullLock<T> where T: ?Sized + Send {}
+
+impl<T> IRQSafeNullLock<T> {
+ /// Wraps `data` into a new `IRQSafeNullLock`.
+ /// Create an instance.
+ pub const fn new(data: T) -> Self {
+ Self {
+ data: UnsafeCell::new(data),
@ -2671,26 +2664,26 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip
+ }
+}
+
+unsafe impl<T: ?Sized> Sync for InitStateLock<T> {}
+unsafe impl<T> Send for InitStateLock<T> where T: ?Sized + Send {}
+unsafe impl<T> Sync for InitStateLock<T> where T: ?Sized + Send {}
-impl<T> NullLock<T> {
- /// Wraps `data` into a new `NullLock`.
+impl<T> InitStateLock<T> {
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@@ -78,8 +110,9 @@
@@ -62,8 +99,9 @@
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
+use crate::{exception, state};
-impl<T> interface::Mutex for &NullLock<T> {
+impl<T> interface::Mutex for &IRQSafeNullLock<T> {
-impl<T> interface::Mutex for NullLock<T> {
+impl<T> interface::Mutex for IRQSafeNullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
@@ -87,6 +120,32 @@
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
@@ -71,6 +109,32 @@
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };
@ -2699,10 +2692,10 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip
+ }
+}
+
+impl<T> interface::ReadWriteEx for &InitStateLock<T> {
+impl<T> interface::ReadWriteEx for InitStateLock<T> {
+ type Data = T;
+
+ fn write<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
+ fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
+ assert!(
+ state::state_manager().is_init(),
+ "InitStateLock::write called after kernel init phase"
@ -2717,7 +2710,7 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip
+ f(data)
+ }
+
+ fn read<R>(&mut self, f: impl FnOnce(&Self::Data) -> R) -> R {
+ fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R {
+ let data = unsafe { &*self.data.get() };
+
f(data)

@ -51,7 +51,10 @@ impl DaifField for FIQ {
}
}
fn is_masked<T: DaifField>() -> bool {
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}

@ -158,8 +158,7 @@ impl exception::asynchronous::interface::IRQManager for GICv2 {
irq_number: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
let mut r = &self.handler_table;
r.write(|table| {
self.handler_table.write(|table| {
let irq_number = irq_number.get();
if table[irq_number].is_some() {
@ -190,8 +189,7 @@ impl exception::asynchronous::interface::IRQManager for GICv2 {
}
// Call the IRQ handler. Panic if there is none.
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
Some(descriptor) => {
@ -210,8 +208,7 @@ impl exception::asynchronous::interface::IRQManager for GICv2 {
info!(" Peripheral handler:");
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
for (i, opt) in table.iter().skip(32).enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i + 32, handler.name);

@ -151,8 +151,7 @@ impl GICD {
// Target all SPIs to the boot core only.
let mask = self.local_gic_target_mask();
let mut r = &self.shared_registers;
r.lock(|regs| {
self.shared_registers.lock(|regs| {
for i in regs.implemented_itargets_slice().iter() {
i.write(
ITARGETSR::Offset3.val(mask)
@ -186,8 +185,7 @@ impl GICD {
_ => {
let enable_reg_index_shared = enable_reg_index - 1;
let mut r = &self.shared_registers;
r.lock(|regs| {
self.shared_registers.lock(|regs| {
let enable_reg = &regs.ISENABLER[enable_reg_index_shared];
enable_reg.set(enable_reg.get() | enable_bit);
});

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -101,8 +101,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
irq: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
let mut r = &self.handler_table;
r.write(|table| {
self.handler_table.write(|table| {
let irq_number = irq.get();
if table[irq_number].is_some() {
@ -116,8 +115,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
}
fn enable(&self, irq: Self::IRQNumberType) {
let mut r = &self.wo_registers;
r.lock(|regs| {
self.wo_registers.lock(|regs| {
let enable_reg = if irq.get() <= 31 {
&regs.ENABLE_1
} else {
@ -136,8 +134,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
&'irq_context self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
for irq_number in self.pending_irqs() {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
@ -155,8 +152,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
info!(" Peripheral handler:");
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
for (i, opt) in table.iter().enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i, handler.name);

@ -341,8 +341,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -367,21 +366,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -391,13 +387,12 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
self.inner
.lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -408,20 +403,17 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}
impl exception::asynchronous::interface::IRQHandler for PL011Uart {
fn handle(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
let pending = inner.registers.MIS.extract();
// Clear all pending IRQs.

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,35 +18,14 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
/// A reader-writer exclusion type.
@ -53,32 +37,34 @@ pub mod interface {
type Data;
/// Grants temporary mutable access to the encapsulated data.
fn write<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Grants temporary immutable access to the encapsulated data.
fn read<R>(&mut self, f: impl FnOnce(&Self::Data) -> R) -> R;
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing on a single core.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct IRQSafeNullLock<T: ?Sized> {
pub struct IRQSafeNullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
/// A pseudo-lock that is RW during the single-core kernel init phase and RO afterwards.
///
/// Intended to encapsulate data that is populated during kernel init when no concurrency exists.
pub struct InitStateLock<T: ?Sized> {
pub struct InitStateLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -86,10 +72,11 @@ pub struct InitStateLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for IRQSafeNullLock<T> {}
unsafe impl<T> Send for IRQSafeNullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for IRQSafeNullLock<T> where T: ?Sized + Send {}
impl<T> IRQSafeNullLock<T> {
/// Wraps `data` into a new `IRQSafeNullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -97,9 +84,11 @@ impl<T> IRQSafeNullLock<T> {
}
}
unsafe impl<T: ?Sized> Sync for InitStateLock<T> {}
unsafe impl<T> Send for InitStateLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for InitStateLock<T> where T: ?Sized + Send {}
impl<T> InitStateLock<T> {
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -112,10 +101,10 @@ impl<T> InitStateLock<T> {
//------------------------------------------------------------------------------
use crate::{exception, state};
impl<T> interface::Mutex for &IRQSafeNullLock<T> {
impl<T> interface::Mutex for IRQSafeNullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };
@ -125,10 +114,10 @@ impl<T> interface::Mutex for &IRQSafeNullLock<T> {
}
}
impl<T> interface::ReadWriteEx for &InitStateLock<T> {
impl<T> interface::ReadWriteEx for InitStateLock<T> {
type Data = T;
fn write<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
assert!(
state::state_manager().is_init(),
"InitStateLock::write called after kernel init phase"
@ -143,7 +132,7 @@ impl<T> interface::ReadWriteEx for &InitStateLock<T> {
f(data)
}
fn read<R>(&mut self, f: impl FnOnce(&Self::Data) -> R) -> R {
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R {
let data = unsafe { &*self.data.get() };
f(data)

@ -211,8 +211,7 @@ unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.phys_mmio_descriptor)?;
let mut r = &self.inner;
r.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
self.inner.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
// omitted for brevity.
@ -834,54 +833,51 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gi
}
+ pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
+ let mut r = &self.registers;
+ r.write(|regs| *regs = Registers::new(new_mmio_start_addr));
+ self.registers
+ .write(|regs| *regs = Registers::new(new_mmio_start_addr));
+ }
+
/// Accept interrupts of any priority.
///
/// Quoting the GICv2 Architecture Specification:
@@ -87,7 +95,10 @@
@@ -87,7 +95,9 @@
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn priority_accept_all(&self) {
- self.registers.PMR.write(PMR::Priority.val(255)); // Comment in arch spec.
+ let mut r = &self.registers;
+ r.read(|regs| {
+ self.registers.read(|regs| {
+ regs.PMR.write(PMR::Priority.val(255)); // Comment in arch spec.
+ });
}
/// Enable the interface - start accepting IRQs.
@@ -97,7 +108,10 @@
@@ -97,7 +107,9 @@
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn enable(&self) {
- self.registers.CTLR.write(CTLR::Enable::SET);
+ let mut r = &self.registers;
+ r.read(|regs| {
+ self.registers.read(|regs| {
+ regs.CTLR.write(CTLR::Enable::SET);
+ });
}
/// Extract the number of the highest-priority pending IRQ.
@@ -113,7 +127,8 @@
@@ -113,7 +125,8 @@
&self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) -> usize {
- self.registers.IAR.read(IAR::InterruptID) as usize
+ let mut r = &self.registers;
+ r.read(|regs| regs.IAR.read(IAR::InterruptID) as usize)
+ self.registers
+ .read(|regs| regs.IAR.read(IAR::InterruptID) as usize)
}
/// Complete handling of the currently active IRQ.
@@ -132,6 +147,9 @@
@@ -132,6 +145,8 @@
irq_number: u32,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
- self.registers.EOIR.write(EOIR::EOIINTID.val(irq_number));
+ let mut r = &self.registers;
+ r.read(|regs| {
+ self.registers.read(|regs| {
+ regs.EOIR.write(EOIR::EOIINTID.val(irq_number));
+ });
}
@ -919,7 +915,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gi
use synchronization::interface::Mutex;
impl GICD {
@@ -127,10 +129,18 @@
@@ -127,10 +129,17 @@
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
shared_registers: IRQSafeNullLock::new(SharedRegisters::new(mmio_start_addr)),
@ -929,40 +925,39 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gi
}
+ pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
+ let mut r = &self.shared_registers;
+ r.lock(|regs| *regs = SharedRegisters::new(new_mmio_start_addr));
+
+ let mut r = &self.banked_registers;
+ r.write(|regs| *regs = BankedRegisters::new(new_mmio_start_addr));
+ self.shared_registers
+ .lock(|regs| *regs = SharedRegisters::new(new_mmio_start_addr));
+ self.banked_registers
+ .write(|regs| *regs = BankedRegisters::new(new_mmio_start_addr));
+ }
+
/// Use a banked ITARGETSR to retrieve the executing core's GIC target mask.
///
/// Quoting the GICv2 Architecture Specification:
@@ -138,7 +148,8 @@
@@ -138,7 +147,8 @@
/// "GICD_ITARGETSR0 to GICD_ITARGETSR7 are read-only, and each field returns a value that
/// corresponds only to the processor reading the register."
fn local_gic_target_mask(&self) -> u32 {
- self.banked_registers.ITARGETSR[0].read(ITARGETSR::Offset0)
+ let mut r = &self.banked_registers;
+ r.read(|regs| regs.ITARGETSR[0].read(ITARGETSR::Offset0))
+ self.banked_registers
+ .read(|regs| regs.ITARGETSR[0].read(ITARGETSR::Offset0))
}
/// Route all SPIs to the boot core and enable the distributor.
@@ -179,8 +190,11 @@
@@ -177,10 +187,10 @@
// Check if we are handling a private or shared IRQ.
match irq_num {
// Private.
0..=31 => {
- 0..=31 => {
- let enable_reg = &self.banked_registers.ISENABLER;
- enable_reg.set(enable_reg.get() | enable_bit);
+ let mut r = &self.banked_registers;
+ r.read(|regs| {
+ let enable_reg = &regs.ISENABLER;
+ enable_reg.set(enable_reg.get() | enable_bit);
+ })
}
+ 0..=31 => self.banked_registers.read(|regs| {
+ let enable_reg = &regs.ISENABLER;
enable_reg.set(enable_reg.get() | enable_bit);
- }
+ }),
// Shared.
_ => {
let enable_reg_index_shared = enable_reg_index - 1;
diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2.rs 15_virtual_mem_part2_mmio_remap/src/bsp/device_driver/arm/gicv2.rs
--- 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2.rs
@ -1108,7 +1103,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_
}
}
@@ -213,4 +233,27 @@
@@ -212,4 +232,27 @@
fn compatible(&self) -> &'static str {
"BCM GPIO"
}
@ -1117,8 +1112,8 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_
+ let virt_addr =
+ memory::mmu::kernel_map_mmio(self.compatible(), &self.phys_mmio_descriptor)?;
+
+ let mut r = &self.inner;
+ r.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
+ self.inner
+ .lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
+
+ self.virt_mmio_start_addr
+ .store(virt_addr.into_usize(), Ordering::Relaxed);
@ -1172,7 +1167,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_
/// Stores registered IRQ handlers. Writable only during kernel init. RO afterwards.
handler_table: InitStateLock<HandlerTable>,
@@ -70,21 +74,27 @@
@@ -70,21 +74,26 @@
///
/// # Safety
///
@ -1196,8 +1191,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_
fn pending_irqs(&self) -> PendingIRQs {
- let pending_mask: u64 = (u64::from(self.ro_registers.PENDING_2.get()) << 32)
- | u64::from(self.ro_registers.PENDING_1.get());
+ let mut r = &self.ro_registers;
+ r.read(|regs| {
+ self.ro_registers.read(|regs| {
+ let pending_mask: u64 =
+ (u64::from(regs.PENDING_2.get()) << 32) | u64::from(regs.PENDING_1.get());
@ -1207,7 +1201,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_
}
}
@@ -93,6 +103,26 @@
@@ -93,6 +102,25 @@
//------------------------------------------------------------------------------
use synchronization::interface::{Mutex, ReadWriteEx};
@ -1221,11 +1215,10 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_
+ memory::mmu::kernel_map_mmio(self.compatible(), &self.phys_mmio_descriptor)?
+ .into_usize();
+
+ let mut r = &self.wo_registers;
+ r.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));
+
+ let mut r = &self.ro_registers;
+ r.write(|regs| *regs = ReadOnlyRegisters::new(virt_addr));
+ self.wo_registers
+ .lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));
+ self.ro_registers
+ .write(|regs| *regs = ReadOnlyRegisters::new(virt_addr));
+
+ Ok(())
+ }
@ -1353,23 +1346,23 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/bcm/bcm2xxx_
irq_number,
}
}
@@ -341,8 +361,14 @@
@@ -341,7 +361,14 @@
}
unsafe fn init(&self) -> Result<(), &'static str> {
- self.inner.lock(|inner| inner.init());
+ let virt_addr =
+ memory::mmu::kernel_map_mmio(self.compatible(), &self.phys_mmio_descriptor)?;
+
let mut r = &self.inner;
- r.lock(|inner| inner.init());
+ r.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
+ self.inner
+ .lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
+
+ self.virt_mmio_start_addr
+ .store(virt_addr.into_usize(), Ordering::Relaxed);
Ok(())
}
@@ -361,6 +387,16 @@
@@ -360,6 +387,16 @@
Ok(())
}
@ -2090,7 +2083,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/main.rs 15_virtual_mem_part2_m
diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 15_virtual_mem_part2_mmio_remap/src/memory/mmu/mapping_record.rs
--- 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs
+++ 15_virtual_mem_part2_mmio_remap/src/memory/mmu/mapping_record.rs
@@ -0,0 +1,224 @@
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
@ -2288,8 +2281,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1
+ virt_pages: &PageSliceDescriptor<Virtual>,
+ attr: &AttributeFields,
+) -> Result<(), &'static str> {
+ let mut m = &KERNEL_MAPPING_RECORD;
+ m.write(|mr| mr.add(name, phys_pages, virt_pages, attr))
+ KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, phys_pages, virt_pages, attr))
+}
+
+pub fn kernel_find_and_insert_mmio_duplicate(
@ -2298,8 +2290,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1
+) -> Option<Address<Virtual>> {
+ let phys_pages: PageSliceDescriptor<Physical> = phys_mmio_descriptor.clone().into();
+
+ let mut m = &KERNEL_MAPPING_RECORD;
+ m.write(|mr| {
+ KERNEL_MAPPING_RECORD.write(|mr| {
+ let dup = mr.find_duplicate(&phys_pages)?;
+
+ if let Err(x) = dup.add_user(new_user) {
@ -2312,8 +2303,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1
+
+/// Human-readable print of all recorded kernel mappings.
+pub fn kernel_print() {
+ let mut m = &KERNEL_MAPPING_RECORD;
+ m.read(|mr| mr.print());
+ KERNEL_MAPPING_RECORD.read(|mr| mr.print());
+}
diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 15_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs

@ -51,7 +51,10 @@ impl DaifField for FIQ {
}
}
fn is_masked<T: DaifField>() -> bool {
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}

@ -190,8 +190,7 @@ impl exception::asynchronous::interface::IRQManager for GICv2 {
irq_number: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
let mut r = &self.handler_table;
r.write(|table| {
self.handler_table.write(|table| {
let irq_number = irq_number.get();
if table[irq_number].is_some() {
@ -222,8 +221,7 @@ impl exception::asynchronous::interface::IRQManager for GICv2 {
}
// Call the IRQ handler. Panic if there is none.
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
Some(descriptor) => {
@ -242,8 +240,7 @@ impl exception::asynchronous::interface::IRQManager for GICv2 {
info!(" Peripheral handler:");
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
for (i, opt) in table.iter().skip(32).enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i + 32, handler.name);

@ -79,8 +79,8 @@ impl GICC {
}
pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
let mut r = &self.registers;
r.write(|regs| *regs = Registers::new(new_mmio_start_addr));
self.registers
.write(|regs| *regs = Registers::new(new_mmio_start_addr));
}
/// Accept interrupts of any priority.
@ -95,8 +95,7 @@ impl GICC {
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn priority_accept_all(&self) {
let mut r = &self.registers;
r.read(|regs| {
self.registers.read(|regs| {
regs.PMR.write(PMR::Priority.val(255)); // Comment in arch spec.
});
}
@ -108,8 +107,7 @@ impl GICC {
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn enable(&self) {
let mut r = &self.registers;
r.read(|regs| {
self.registers.read(|regs| {
regs.CTLR.write(CTLR::Enable::SET);
});
}
@ -127,8 +125,8 @@ impl GICC {
&self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) -> usize {
let mut r = &self.registers;
r.read(|regs| regs.IAR.read(IAR::InterruptID) as usize)
self.registers
.read(|regs| regs.IAR.read(IAR::InterruptID) as usize)
}
/// Complete handling of the currently active IRQ.
@ -147,8 +145,7 @@ impl GICC {
irq_number: u32,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
let mut r = &self.registers;
r.read(|regs| {
self.registers.read(|regs| {
regs.EOIR.write(EOIR::EOIINTID.val(irq_number));
});
}

@ -134,11 +134,10 @@ impl GICD {
}
pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
let mut r = &self.shared_registers;
r.lock(|regs| *regs = SharedRegisters::new(new_mmio_start_addr));
let mut r = &self.banked_registers;
r.write(|regs| *regs = BankedRegisters::new(new_mmio_start_addr));
self.shared_registers
.lock(|regs| *regs = SharedRegisters::new(new_mmio_start_addr));
self.banked_registers
.write(|regs| *regs = BankedRegisters::new(new_mmio_start_addr));
}
/// Use a banked ITARGETSR to retrieve the executing core's GIC target mask.
@ -148,8 +147,8 @@ impl GICD {
/// "GICD_ITARGETSR0 to GICD_ITARGETSR7 are read-only, and each field returns a value that
/// corresponds only to the processor reading the register."
fn local_gic_target_mask(&self) -> u32 {
let mut r = &self.banked_registers;
r.read(|regs| regs.ITARGETSR[0].read(ITARGETSR::Offset0))
self.banked_registers
.read(|regs| regs.ITARGETSR[0].read(ITARGETSR::Offset0))
}
/// Route all SPIs to the boot core and enable the distributor.
@ -162,8 +161,7 @@ impl GICD {
// Target all SPIs to the boot core only.
let mask = self.local_gic_target_mask();
let mut r = &self.shared_registers;
r.lock(|regs| {
self.shared_registers.lock(|regs| {
for i in regs.implemented_itargets_slice().iter() {
i.write(
ITARGETSR::Offset3.val(mask)
@ -189,19 +187,15 @@ impl GICD {
// Check if we are handling a private or shared IRQ.
match irq_num {
// Private.
0..=31 => {
let mut r = &self.banked_registers;
r.read(|regs| {
let enable_reg = &regs.ISENABLER;
enable_reg.set(enable_reg.get() | enable_bit);
})
}
0..=31 => self.banked_registers.read(|regs| {
let enable_reg = &regs.ISENABLER;
enable_reg.set(enable_reg.get() | enable_bit);
}),
// Shared.
_ => {
let enable_reg_index_shared = enable_reg_index - 1;
let mut r = &self.shared_registers;
r.lock(|regs| {
self.shared_registers.lock(|regs| {
let enable_reg = &regs.ISENABLER[enable_reg_index_shared];
enable_reg.set(enable_reg.get() | enable_bit);
});

@ -219,8 +219,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}
@ -238,8 +237,8 @@ impl driver::interface::DeviceDriver for GPIO {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.phys_mmio_descriptor)?;
let mut r = &self.inner;
r.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);

@ -88,8 +88,7 @@ impl PeripheralIC {
/// Query the list of pending IRQs.
fn pending_irqs(&self) -> PendingIRQs {
let mut r = &self.ro_registers;
r.read(|regs| {
self.ro_registers.read(|regs| {
let pending_mask: u64 =
(u64::from(regs.PENDING_2.get()) << 32) | u64::from(regs.PENDING_1.get());
@ -113,11 +112,10 @@ impl driver::interface::DeviceDriver for PeripheralIC {
memory::mmu::kernel_map_mmio(self.compatible(), &self.phys_mmio_descriptor)?
.into_usize();
let mut r = &self.wo_registers;
r.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));
let mut r = &self.ro_registers;
r.write(|regs| *regs = ReadOnlyRegisters::new(virt_addr));
self.wo_registers
.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));
self.ro_registers
.write(|regs| *regs = ReadOnlyRegisters::new(virt_addr));
Ok(())
}
@ -131,8 +129,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
irq: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
let mut r = &self.handler_table;
r.write(|table| {
self.handler_table.write(|table| {
let irq_number = irq.get();
if table[irq_number].is_some() {
@ -146,8 +143,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
}
fn enable(&self, irq: Self::IRQNumberType) {
let mut r = &self.wo_registers;
r.lock(|regs| {
self.wo_registers.lock(|regs| {
let enable_reg = if irq.get() <= 31 {
&regs.ENABLE_1
} else {
@ -166,8 +162,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
&'irq_context self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
for irq_number in self.pending_irqs() {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
@ -185,8 +180,7 @@ impl exception::asynchronous::interface::IRQManager for PeripheralIC {
info!(" Peripheral handler:");
let mut r = &self.handler_table;
r.read(|table| {
self.handler_table.read(|table| {
for (i, opt) in table.iter().enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i, handler.name);

@ -364,8 +364,8 @@ impl driver::interface::DeviceDriver for PL011Uart {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.phys_mmio_descriptor)?;
let mut r = &self.inner;
r.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
@ -403,21 +403,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -427,13 +424,12 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
self.inner
.lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -444,20 +440,17 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}
impl exception::asynchronous::interface::IRQHandler for PL011Uart {
fn handle(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
let pending = inner.registers.MIS.extract();
// Clear all pending IRQs.

@ -195,8 +195,7 @@ pub fn kernel_add(
virt_pages: &PageSliceDescriptor<Virtual>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let mut m = &KERNEL_MAPPING_RECORD;
m.write(|mr| mr.add(name, phys_pages, virt_pages, attr))
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, phys_pages, virt_pages, attr))
}
pub fn kernel_find_and_insert_mmio_duplicate(
@ -205,8 +204,7 @@ pub fn kernel_find_and_insert_mmio_duplicate(
) -> Option<Address<Virtual>> {
let phys_pages: PageSliceDescriptor<Physical> = phys_mmio_descriptor.clone().into();
let mut m = &KERNEL_MAPPING_RECORD;
m.write(|mr| {
KERNEL_MAPPING_RECORD.write(|mr| {
let dup = mr.find_duplicate(&phys_pages)?;
if let Err(x) = dup.add_user(new_user) {
@ -219,6 +217,5 @@ pub fn kernel_find_and_insert_mmio_duplicate(
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print() {
let mut m = &KERNEL_MAPPING_RECORD;
m.read(|mr| mr.print());
KERNEL_MAPPING_RECORD.read(|mr| mr.print());
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,35 +18,14 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
/// A reader-writer exclusion type.
@ -53,32 +37,34 @@ pub mod interface {
type Data;
/// Grants temporary mutable access to the encapsulated data.
fn write<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Grants temporary immutable access to the encapsulated data.
fn read<R>(&mut self, f: impl FnOnce(&Self::Data) -> R) -> R;
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing on a single core.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct IRQSafeNullLock<T: ?Sized> {
pub struct IRQSafeNullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
/// A pseudo-lock that is RW during the single-core kernel init phase and RO afterwards.
///
/// Intended to encapsulate data that is populated during kernel init when no concurrency exists.
pub struct InitStateLock<T: ?Sized> {
pub struct InitStateLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -86,10 +72,11 @@ pub struct InitStateLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for IRQSafeNullLock<T> {}
unsafe impl<T> Send for IRQSafeNullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for IRQSafeNullLock<T> where T: ?Sized + Send {}
impl<T> IRQSafeNullLock<T> {
/// Wraps `data` into a new `IRQSafeNullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -97,9 +84,11 @@ impl<T> IRQSafeNullLock<T> {
}
}
unsafe impl<T: ?Sized> Sync for InitStateLock<T> {}
unsafe impl<T> Send for InitStateLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for InitStateLock<T> where T: ?Sized + Send {}
impl<T> InitStateLock<T> {
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -112,10 +101,10 @@ impl<T> InitStateLock<T> {
//------------------------------------------------------------------------------
use crate::{exception, state};
impl<T> interface::Mutex for &IRQSafeNullLock<T> {
impl<T> interface::Mutex for IRQSafeNullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };
@ -125,10 +114,10 @@ impl<T> interface::Mutex for &IRQSafeNullLock<T> {
}
}
impl<T> interface::ReadWriteEx for &InitStateLock<T> {
impl<T> interface::ReadWriteEx for InitStateLock<T> {
type Data = T;
fn write<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
assert!(
state::state_manager().is_init(),
"InitStateLock::write called after kernel init phase"
@ -143,7 +132,7 @@ impl<T> interface::ReadWriteEx for &InitStateLock<T> {
f(data)
}
fn read<R>(&mut self, f: impl FnOnce(&Self::Data) -> R) -> R {
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R {
let data = unsafe { &*self.data.get() };
f(data)

@ -199,8 +199,7 @@ impl GPIO {
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| inner.map_pl011_uart())
self.inner.lock(|inner| inner.map_pl011_uart())
}
}

@ -250,8 +250,7 @@ impl driver::interface::DeviceDriver for PL011Uart {
}
unsafe fn init(&self) -> Result<(), &'static str> {
let mut r = &self.inner;
r.lock(|inner| inner.init());
self.inner.lock(|inner| inner.init());
Ok(())
}
@ -261,21 +260,18 @@ impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
while !inner.registers.FR.matches_all(FR::TXFE::SET) {
cpu::nop();
}
@ -285,8 +281,7 @@ impl console::interface::Write for PL011Uart {
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
@ -308,8 +303,7 @@ impl console::interface::Read for PL011Uart {
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
self.inner.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.registers.FR.matches_all(FR::RXFE::SET) {
inner.registers.DR.get();
@ -320,12 +314,10 @@ impl console::interface::Read for PL011Uart {
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_read)
self.inner.lock(|inner| inner.chars_read)
}
}

@ -3,6 +3,11 @@
// Copyright (c) 2020 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! Suggested literature:
//! - https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html
//! - https://stackoverflow.com/questions/59428096/understanding-the-send-trait
//! - https://doc.rust-lang.org/std/cell/index.html
use core::cell::UnsafeCell;
@ -13,50 +18,28 @@ use core::cell::UnsafeCell;
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
///
/// The trait follows the [Rust embedded WG's proposal] and therefore provides some goodness
/// such as [deadlock prevention].
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// [Rust embedded WG's proposal]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md
/// [deadlock prevention]: https://github.com/rust-embedded/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// The type of encapsulated data.
/// The type of the data that is wrapped by this mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
pub struct NullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
@ -64,10 +47,11 @@ pub struct NullLock<T: ?Sized> {
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T: ?Sized> Sync for NullLock<T> {}
unsafe impl<T> Send for NullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for NullLock<T> where T: ?Sized + Send {}
impl<T> NullLock<T> {
/// Wraps `data` into a new `NullLock`.
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
@ -79,10 +63,10 @@ impl<T> NullLock<T> {
// OS Interface Code
//------------------------------------------------------------------------------
impl<T> interface::Mutex for &NullLock<T> {
impl<T> interface::Mutex for NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };

Loading…
Cancel
Save