Remove UB in linker script symbol <-> Rust handover

pull/84/head
Andre Richter 4 years ago
parent 8fc250fc08
commit 27a1d10cc3
No known key found for this signature in database
GPG Key ID: 2116C1AB102F615E

@ -68,7 +68,7 @@ diff -uNr 01_wait_forever/src/bsp/raspberrypi/link.ld 02_runtime_init/src/bsp/ra
+ __bss_start = .;
+ *(.bss*);
+ . = ALIGN(8);
+ __bss_end = .;
+ __bss_end_inclusive = . - 8;
+ }
+
/DISCARD/ : { *(.comment*) }
@ -77,42 +77,37 @@ diff -uNr 01_wait_forever/src/bsp/raspberrypi/link.ld 02_runtime_init/src/bsp/ra
diff -uNr 01_wait_forever/src/bsp/raspberrypi/memory.rs 02_runtime_init/src/bsp/raspberrypi/memory.rs
--- 01_wait_forever/src/bsp/raspberrypi/memory.rs
+++ 02_runtime_init/src/bsp/raspberrypi/memory.rs
@@ -0,0 +1,36 @@
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2018-2020 Andre Richter <andre.o.richter@gmail.com>
+
+//! BSP Memory Management.
+
+use core::ops::Range;
+use core::{cell::UnsafeCell, ops::RangeInclusive};
+
+//--------------------------------------------------------------------------------------------------
+// Private Definitions
+//--------------------------------------------------------------------------------------------------
+
+// Symbols from the linker script.
+extern "C" {
+ static __bss_start: usize;
+ static __bss_end: usize;
+extern "Rust" {
+ static __bss_start: UnsafeCell<u64>;
+ static __bss_end_inclusive: UnsafeCell<u64>;
+}
+
+//--------------------------------------------------------------------------------------------------
+// Public Code
+//--------------------------------------------------------------------------------------------------
+
+/// Return the range spanning the .bss section.
+/// Return the inclusive range spanning the .bss section.
+///
+/// # Safety
+///
+/// - Values are provided by the linker script and must be trusted as-is.
+/// - The linker-provided addresses must be u64 aligned.
+pub fn bss_range() -> Range<*mut u64> {
+ unsafe {
+ Range {
+ start: &__bss_start as *const _ as *mut u64,
+ end: &__bss_end as *const _ as *mut u64,
+ }
+ }
+pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
+ unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
+}
diff -uNr 01_wait_forever/src/bsp/raspberrypi.rs 02_runtime_init/src/bsp/raspberrypi.rs
@ -155,34 +150,39 @@ diff -uNr 01_wait_forever/src/main.rs 02_runtime_init/src/main.rs
diff -uNr 01_wait_forever/src/memory.rs 02_runtime_init/src/memory.rs
--- 01_wait_forever/src/memory.rs
+++ 02_runtime_init/src/memory.rs
@@ -0,0 +1,29 @@
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2018-2020 Andre Richter <andre.o.richter@gmail.com>
+
+//! Memory Management.
+
+use core::ops::Range;
+use core::ops::RangeInclusive;
+
+//--------------------------------------------------------------------------------------------------
+// Public Code
+//--------------------------------------------------------------------------------------------------
+
+/// Zero out a memory range.
+/// Zero out an inclusive memory range.
+///
+/// # Safety
+///
+/// - `range.start` and `range.end` must be valid.
+/// - `range.start` and `range.end` must be `T` aligned.
+pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
+pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
+where
+ T: From<u8>,
+{
+ let mut ptr = range.start;
+ let mut ptr = *range.start();
+ let end_inclusive = *range.end();
+
+ while ptr < range.end {
+ loop {
+ core::ptr::write_volatile(ptr, T::from(0));
+ ptr = ptr.offset(1);
+
+ if ptr > end_inclusive {
+ break;
+ }
+ }
+}
@ -209,7 +209,7 @@ diff -uNr 01_wait_forever/src/runtime_init.rs 02_runtime_init/src/runtime_init.r
+/// - Must only be called pre `kernel_init()`.
+#[inline(always)]
+unsafe fn zero_bss() {
+ memory::zero_volatile(bsp::memory::bss_range());
+ memory::zero_volatile(bsp::memory::bss_range_inclusive());
+}
+
+//--------------------------------------------------------------------------------------------------

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,33 +4,28 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,33 +4,28 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -180,7 +180,7 @@ diff -uNr 03_hacky_hello_world/src/bsp/raspberrypi/memory.rs 04_zero_overhead_ab
+ map::BOOT_CORE_STACK_END
+}
+
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -36,17 +36,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -36,17 +36,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -59,17 +59,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -263,7 +263,7 @@ diff -uNr 06_drivers_gpio_uart/src/bsp/raspberrypi/link.ld 07_uart_chainloader/s
{
*(.text._start) *(.text*)
@@ -32,5 +33,14 @@
__bss_end = .;
__bss_end_inclusive = . - 8;
}
+ .got :
@ -307,7 +307,7 @@ diff -uNr 06_drivers_gpio_uart/src/bsp/raspberrypi/memory.rs 07_uart_chainloader
+ map::BOARD_DEFAULT_LOAD_ADDRESS
+}
+
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety

@ -30,7 +30,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
.got :

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -67,17 +67,12 @@ pub fn board_default_load_addr() -> usize {
map::BOARD_DEFAULT_LOAD_ADDRESS
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -51,7 +51,7 @@ impl RunTimeInit for Traitor {}
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -263,7 +263,7 @@ diff -uNr 07_uart_chainloader/src/bsp/raspberrypi/link.ld 08_timestamps/src/bsp/
{
*(.text._start) *(.text*)
@@ -33,14 +32,5 @@
__bss_end = .;
__bss_end_inclusive = . - 8;
}
- .got :
@ -307,7 +307,7 @@ diff -uNr 07_uart_chainloader/src/bsp/raspberrypi/memory.rs 08_timestamps/src/bs
- map::BOARD_DEFAULT_LOAD_ADDRESS
-}
-
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -59,17 +59,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -59,17 +59,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -59,17 +59,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -152,8 +152,8 @@ type ArchTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
///
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that they boil down to all "0" entries.
static mut TABLES: ArchTranslationTable = ArchTranslationTable::new();
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut KERNEL_TABLES: ArchTranslationTable = ArchTranslationTable::new();
```
They are populated using `bsp::memory::mmu::virt_mem_layout().virt_addr_properties()` and a bunch of
@ -443,7 +443,7 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+/// # Safety
+///
+/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
+static mut TABLES: ArchTranslationTable = ArchTranslationTable::new();
+static mut KERNEL_TABLES: ArchTranslationTable = ArchTranslationTable::new();
+
+static MMU: MemoryManagementUnit = MemoryManagementUnit;
+
@ -559,10 +559,10 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+///
+/// - Modifies a `static mut`. Ensure it only happens from here.
+unsafe fn populate_tt_entries() -> Result<(), &'static str> {
+ for (l2_nr, l2_entry) in TABLES.lvl2.iter_mut().enumerate() {
+ *l2_entry = TABLES.lvl3[l2_nr].base_addr_usize().into();
+ for (l2_nr, l2_entry) in KERNEL_TABLES.lvl2.iter_mut().enumerate() {
+ *l2_entry = KERNEL_TABLES.lvl3[l2_nr].base_addr_usize().into();
+
+ for (l3_nr, l3_entry) in TABLES.lvl3[l2_nr].iter_mut().enumerate() {
+ for (l3_nr, l3_entry) in KERNEL_TABLES.lvl3[l2_nr].iter_mut().enumerate() {
+ let virt_addr = (l2_nr << FIVETWELVE_MIB_SHIFT) + (l3_nr << SIXTYFOUR_KIB_SHIFT);
+
+ let (output_addr, attribute_fields) =
@ -618,7 +618,7 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+ populate_tt_entries()?;
+
+ // Set the "Translation Table Base Register".
+ TTBR0_EL1.set_baddr(TABLES.lvl2.base_addr_u64());
+ TTBR0_EL1.set_baddr(KERNEL_TABLES.lvl2.base_addr_u64());
+
+ configure_translation_control();
+
@ -760,18 +760,18 @@ diff -uNr 10_privilege_level/src/bsp/raspberrypi/memory.rs 11_virtual_mem_part1_
+pub mod mmu;
+
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
@@ -12,6 +14,8 @@
// Symbols from the linker script.
extern "C" {
+ static __ro_start: usize;
+ static __ro_end: usize;
static __bss_start: usize;
static __bss_end: usize;
@@ -14,6 +16,8 @@
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
+ static __ro_start: UnsafeCell<()>;
+ static __ro_end: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@@ -23,6 +27,8 @@
/// The board's memory map.
#[rustfmt::skip]
@ -808,7 +808,7 @@ diff -uNr 10_privilege_level/src/bsp/raspberrypi/memory.rs 11_virtual_mem_part1_
+/// - Value is provided by the linker script and must be trusted as-is.
+#[inline(always)]
+fn ro_start() -> usize {
+ unsafe { &__ro_start as *const _ as usize }
+ unsafe { __ro_start.get() as usize }
+}
+
+/// Size of the Read-Only (RO) range of the kernel binary.
@ -818,7 +818,7 @@ diff -uNr 10_privilege_level/src/bsp/raspberrypi/memory.rs 11_virtual_mem_part1_
+/// - Value is provided by the linker script and must be trusted as-is.
+#[inline(always)]
+fn ro_end() -> usize {
+ unsafe { &__ro_end as *const _ as usize }
+ unsafe { __ro_end.get() as usize }
+}
+
+//--------------------------------------------------------------------------------------------------
@ -1123,7 +1123,7 @@ diff -uNr 10_privilege_level/src/memory.rs 11_virtual_mem_part1_identity_mapping
+pub mod mmu;
+
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------

@ -138,7 +138,7 @@ struct MemoryManagementUnit;
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut TABLES: ArchTranslationTable = ArchTranslationTable::new();
static mut KERNEL_TABLES: ArchTranslationTable = ArchTranslationTable::new();
static MMU: MemoryManagementUnit = MemoryManagementUnit;
@ -254,10 +254,10 @@ fn set_up_mair() {
///
/// - Modifies a `static mut`. Ensure it only happens from here.
unsafe fn populate_tt_entries() -> Result<(), &'static str> {
for (l2_nr, l2_entry) in TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l2_nr, l2_entry) in KERNEL_TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = KERNEL_TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l3_nr, l3_entry) in TABLES.lvl3[l2_nr].iter_mut().enumerate() {
for (l3_nr, l3_entry) in KERNEL_TABLES.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << FIVETWELVE_MIB_SHIFT) + (l3_nr << SIXTYFOUR_KIB_SHIFT);
let (output_addr, attribute_fields) =
@ -313,7 +313,7 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
populate_tt_entries()?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(TABLES.lvl2.base_addr_u64());
TTBR0_EL1.set_baddr(KERNEL_TABLES.lvl2.base_addr_u64());
configure_translation_control();

@ -32,7 +32,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -6,18 +6,18 @@
pub mod mmu;
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __ro_start: usize;
static __ro_end: usize;
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
static __ro_start: UnsafeCell<()>;
static __ro_end: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -68,7 +68,7 @@ pub(super) mod map {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_start() -> usize {
unsafe { &__ro_start as *const _ as usize }
unsafe { __ro_start.get() as usize }
}
/// Size of the Read-Only (RO) range of the kernel binary.
@ -78,7 +78,7 @@ fn ro_start() -> usize {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_end() -> usize {
unsafe { &__ro_end as *const _ as usize }
unsafe { __ro_end.get() as usize }
}
//--------------------------------------------------------------------------------------------------
@ -91,17 +91,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -6,26 +6,31 @@
pub mod mmu;
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -486,7 +486,7 @@ diff -uNr 11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/exception.rs 1
//! Architectural synchronous and asynchronous exception handling.
-use cortex_a::regs::*;
+use core::fmt;
+use core::{cell::UnsafeCell, fmt};
+use cortex_a::{asm, barrier, regs::*};
+use register::InMemoryRegister;
+
@ -713,7 +713,7 @@ diff -uNr 11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/exception.rs 1
//--------------------------------------------------------------------------------------------------
// Public Code
@@ -21,3 +244,24 @@
@@ -21,3 +244,23 @@
_ => (PrivilegeLevel::Unknown, "Unknown"),
}
}
@ -728,12 +728,11 @@ diff -uNr 11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/exception.rs 1
+/// Manual.
+pub unsafe fn handling_init() {
+ // Provided by exception.S.
+ extern "C" {
+ static mut __exception_vector_start: u64;
+ extern "Rust" {
+ static __exception_vector_start: UnsafeCell<()>;
+ }
+ let addr: u64 = &__exception_vector_start as *const _ as u64;
+
+ VBAR_EL1.set(addr);
+ VBAR_EL1.set(__exception_vector_start.get() as u64);
+
+ // Force VBAR update to complete before next instruction.
+ barrier::isb(barrier::SY);

@ -4,7 +4,7 @@
//! Architectural synchronous and asynchronous exception handling.
use core::fmt;
use core::{cell::UnsafeCell, fmt};
use cortex_a::{asm, barrier, regs::*};
use register::InMemoryRegister;
@ -255,12 +255,11 @@ pub fn current_privilege_level() -> (PrivilegeLevel, &'static str) {
/// Manual.
pub unsafe fn handling_init() {
// Provided by exception.S.
extern "C" {
static mut __exception_vector_start: u64;
extern "Rust" {
static __exception_vector_start: UnsafeCell<()>;
}
let addr: u64 = &__exception_vector_start as *const _ as u64;
VBAR_EL1.set(addr);
VBAR_EL1.set(__exception_vector_start.get() as u64);
// Force VBAR update to complete before next instruction.
barrier::isb(barrier::SY);

@ -138,7 +138,7 @@ struct MemoryManagementUnit;
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut TABLES: ArchTranslationTable = ArchTranslationTable::new();
static mut KERNEL_TABLES: ArchTranslationTable = ArchTranslationTable::new();
static MMU: MemoryManagementUnit = MemoryManagementUnit;
@ -254,10 +254,10 @@ fn set_up_mair() {
///
/// - Modifies a `static mut`. Ensure it only happens from here.
unsafe fn populate_tt_entries() -> Result<(), &'static str> {
for (l2_nr, l2_entry) in TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l2_nr, l2_entry) in KERNEL_TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = KERNEL_TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l3_nr, l3_entry) in TABLES.lvl3[l2_nr].iter_mut().enumerate() {
for (l3_nr, l3_entry) in KERNEL_TABLES.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << FIVETWELVE_MIB_SHIFT) + (l3_nr << SIXTYFOUR_KIB_SHIFT);
let (output_addr, attribute_fields) =
@ -313,7 +313,7 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
populate_tt_entries()?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(TABLES.lvl2.base_addr_u64());
TTBR0_EL1.set_baddr(KERNEL_TABLES.lvl2.base_addr_u64());
configure_translation_control();

@ -37,7 +37,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -6,18 +6,18 @@
pub mod mmu;
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __ro_start: usize;
static __ro_end: usize;
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
static __ro_start: UnsafeCell<()>;
static __ro_end: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -68,7 +68,7 @@ pub(super) mod map {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_start() -> usize {
unsafe { &__ro_start as *const _ as usize }
unsafe { __ro_start.get() as usize }
}
/// Size of the Read-Only (RO) range of the kernel binary.
@ -78,7 +78,7 @@ fn ro_start() -> usize {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_end() -> usize {
unsafe { &__ro_end as *const _ as usize }
unsafe { __ro_end.get() as usize }
}
//--------------------------------------------------------------------------------------------------
@ -91,17 +91,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -6,26 +6,31 @@
pub mod mmu;
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

@ -720,14 +720,17 @@ RUSTFLAGS="-C link-arg=-Tsrc/bsp/raspberrypi/link.ld -C target-cpu=cortex-a53 -D
Finished release [optimized] target(s) in 0.01s
Running target/aarch64-unknown-none-softfloat/release/deps/libkernel-4cc6412ddf631982
-------------------------------------------------------------------
🦀 Running 5 tests
🦀 Running 8 tests
-------------------------------------------------------------------
1. bss_section_is_sane.......................................[ok]
2. virt_mem_layout_sections_are_64KiB_aligned................[ok]
3. virt_mem_layout_has_no_overlaps...........................[ok]
4. test_runner_executes_in_kernel_mode.......................[ok]
5. zero_volatile_works.......................................[ok]
1. virt_mem_layout_sections_are_64KiB_aligned................[ok]
2. virt_mem_layout_has_no_overlaps...........................[ok]
3. test_runner_executes_in_kernel_mode.......................[ok]
4. size_of_tabledescriptor_equals_64_bit.....................[ok]
5. size_of_pagedescriptor_equals_64_bit......................[ok]
6. kernel_tables_in_bss......................................[ok]
7. zero_volatile_works.......................................[ok]
8. bss_section_is_sane.......................................[ok]
-------------------------------------------------------------------
✅ Success: libkernel
@ -974,7 +977,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/exception.rs 13_integ
@@ -5,7 +5,7 @@
//! Architectural synchronous and asynchronous exception handling.
use core::fmt;
use core::{cell::UnsafeCell, fmt};
-use cortex_a::{asm, barrier, regs::*};
+use cortex_a::{barrier, regs::*};
use register::InMemoryRegister;
@ -998,6 +1001,51 @@ diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/exception.rs 13_integ
}
diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs 13_integrated_testing/src/_arch/aarch64/memory/mmu.rs
--- 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs
+++ 13_integrated_testing/src/_arch/aarch64/memory/mmu.rs
@@ -331,3 +331,40 @@
Ok(())
}
}
+
+//--------------------------------------------------------------------------------------------------
+// Testing
+//--------------------------------------------------------------------------------------------------
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use test_macros::kernel_test;
+
+ /// Check if the size of `struct TableDescriptor` is as expected.
+ #[kernel_test]
+ fn size_of_tabledescriptor_equals_64_bit() {
+ assert_eq!(
+ core::mem::size_of::<TableDescriptor>(),
+ core::mem::size_of::<u64>()
+ );
+ }
+
+ /// Check if the size of `struct PageDescriptor` is as expected.
+ #[kernel_test]
+ fn size_of_pagedescriptor_equals_64_bit() {
+ assert_eq!(
+ core::mem::size_of::<PageDescriptor>(),
+ core::mem::size_of::<u64>()
+ );
+ }
+
+ /// Check if KERNEL_TABLES is in .bss.
+ #[kernel_test]
+ fn kernel_tables_in_bss() {
+ let bss_range = bsp::memory::bss_range_inclusive();
+ let kernel_tables_addr = unsafe { &KERNEL_TABLES as *const _ as usize as *mut u64 };
+
+ assert!(bss_range.contains(&kernel_tables_addr));
+ }
+}
diff -uNr 12_exceptions_part1_groundwork/src/bsp/raspberrypi/console.rs 13_integrated_testing/src/bsp/raspberrypi/console.rs
--- 12_exceptions_part1_groundwork/src/bsp/raspberrypi/console.rs
+++ 13_integrated_testing/src/bsp/raspberrypi/console.rs
@ -1478,8 +1526,8 @@ diff -uNr 12_exceptions_part1_groundwork/src/memory/mmu.rs 13_integrated_testing
diff -uNr 12_exceptions_part1_groundwork/src/memory.rs 13_integrated_testing/src/memory.rs
--- 12_exceptions_part1_groundwork/src/memory.rs
+++ 13_integrated_testing/src/memory.rs
@@ -29,3 +29,24 @@
ptr = ptr.offset(1);
@@ -34,3 +34,40 @@
}
}
}
+
@ -1497,11 +1545,27 @@ diff -uNr 12_exceptions_part1_groundwork/src/memory.rs 13_integrated_testing/src
+ fn zero_volatile_works() {
+ let mut x: [usize; 3] = [10, 11, 12];
+ let x_range = x.as_mut_ptr_range();
+ let x_range_inclusive =
+ RangeInclusive::new(x_range.start, unsafe { x_range.end.offset(-1) });
+
+ unsafe { zero_volatile(x_range) };
+ unsafe { zero_volatile(x_range_inclusive) };
+
+ assert_eq!(x, [0, 0, 0]);
+ }
+
+ /// Check `bss` section layout.
+ #[kernel_test]
+ fn bss_section_is_sane() {
+ use crate::bsp::memory::bss_range_inclusive;
+ use core::mem;
+
+ let start = *bss_range_inclusive().start() as usize;
+ let end = *bss_range_inclusive().end() as usize;
+
+ assert_eq!(start modulo mem::size_of::<usize>(), 0);
+ assert_eq!(end modulo mem::size_of::<usize>(), 0);
+ assert!(end >= start);
+ }
+}
diff -uNr 12_exceptions_part1_groundwork/src/panic_wait.rs 13_integrated_testing/src/panic_wait.rs
@ -1553,40 +1617,18 @@ diff -uNr 12_exceptions_part1_groundwork/src/panic_wait.rs 13_integrated_testing
diff -uNr 12_exceptions_part1_groundwork/src/runtime_init.rs 13_integrated_testing/src/runtime_init.rs
--- 12_exceptions_part1_groundwork/src/runtime_init.rs
+++ 13_integrated_testing/src/runtime_init.rs
@@ -31,7 +31,33 @@
@@ -31,7 +31,10 @@
///
/// - Only a single core must be active and running this function.
pub unsafe fn runtime_init() -> ! {
- zero_bss();
+ extern "Rust" {
+ fn kernel_init() -> !;
+ }
+
zero_bss();
+ kernel_init()
+}
+
+//--------------------------------------------------------------------------------------------------
+// Testing
+//--------------------------------------------------------------------------------------------------
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use test_macros::kernel_test;
+
+ /// Check `bss` section layout.
+ #[kernel_test]
+ fn bss_section_is_sane() {
+ use core::mem;
+
+ let start = bsp::memory::bss_range().start as *const _ as usize;
+ let end = bsp::memory::bss_range().end as *const _ as usize;
- crate::kernel_init()
+ assert_eq!(start modulo mem::size_of::<usize>(), 0);
+ assert_eq!(end modulo mem::size_of::<usize>(), 0);
+ assert!(end >= start);
+ }
+ zero_bss();
+ kernel_init()
}
diff -uNr 12_exceptions_part1_groundwork/test-macros/Cargo.toml 13_integrated_testing/test-macros/Cargo.toml

@ -4,7 +4,7 @@
//! Architectural synchronous and asynchronous exception handling.
use core::fmt;
use core::{cell::UnsafeCell, fmt};
use cortex_a::{barrier, regs::*};
use register::InMemoryRegister;
@ -245,12 +245,11 @@ pub fn current_privilege_level() -> (PrivilegeLevel, &'static str) {
/// Manual.
pub unsafe fn handling_init() {
// Provided by exception.S.
extern "C" {
static mut __exception_vector_start: u64;
extern "Rust" {
static __exception_vector_start: UnsafeCell<()>;
}
let addr: u64 = &__exception_vector_start as *const _ as u64;
VBAR_EL1.set(addr);
VBAR_EL1.set(__exception_vector_start.get() as u64);
// Force VBAR update to complete before next instruction.
barrier::isb(barrier::SY);

@ -138,7 +138,7 @@ struct MemoryManagementUnit;
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut TABLES: ArchTranslationTable = ArchTranslationTable::new();
static mut KERNEL_TABLES: ArchTranslationTable = ArchTranslationTable::new();
static MMU: MemoryManagementUnit = MemoryManagementUnit;
@ -254,10 +254,10 @@ fn set_up_mair() {
///
/// - Modifies a `static mut`. Ensure it only happens from here.
unsafe fn populate_tt_entries() -> Result<(), &'static str> {
for (l2_nr, l2_entry) in TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l2_nr, l2_entry) in KERNEL_TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = KERNEL_TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l3_nr, l3_entry) in TABLES.lvl3[l2_nr].iter_mut().enumerate() {
for (l3_nr, l3_entry) in KERNEL_TABLES.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << FIVETWELVE_MIB_SHIFT) + (l3_nr << SIXTYFOUR_KIB_SHIFT);
let (output_addr, attribute_fields) =
@ -313,7 +313,7 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
populate_tt_entries()?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(TABLES.lvl2.base_addr_u64());
TTBR0_EL1.set_baddr(KERNEL_TABLES.lvl2.base_addr_u64());
configure_translation_control();
@ -331,3 +331,40 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
Ok(())
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check if the size of `struct TableDescriptor` is as expected.
#[kernel_test]
fn size_of_tabledescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<TableDescriptor>(),
core::mem::size_of::<u64>()
);
}
/// Check if the size of `struct PageDescriptor` is as expected.
#[kernel_test]
fn size_of_pagedescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<PageDescriptor>(),
core::mem::size_of::<u64>()
);
}
/// Check if KERNEL_TABLES is in .bss.
#[kernel_test]
fn kernel_tables_in_bss() {
let bss_range = bsp::memory::bss_range_inclusive();
let kernel_tables_addr = unsafe { &KERNEL_TABLES as *const _ as usize as *mut u64 };
assert!(bss_range.contains(&kernel_tables_addr));
}
}

@ -37,7 +37,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -6,18 +6,18 @@
pub mod mmu;
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __ro_start: usize;
static __ro_end: usize;
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
static __ro_start: UnsafeCell<()>;
static __ro_end: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -68,7 +68,7 @@ pub(super) mod map {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_start() -> usize {
unsafe { &__ro_start as *const _ as usize }
unsafe { __ro_start.get() as usize }
}
/// Size of the Read-Only (RO) range of the kernel binary.
@ -78,7 +78,7 @@ fn ro_start() -> usize {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_end() -> usize {
unsafe { &__ro_end as *const _ as usize }
unsafe { __ro_end.get() as usize }
}
//--------------------------------------------------------------------------------------------------
@ -91,17 +91,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -6,27 +6,32 @@
pub mod mmu;
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}
@ -44,9 +49,25 @@ mod tests {
fn zero_volatile_works() {
let mut x: [usize; 3] = [10, 11, 12];
let x_range = x.as_mut_ptr_range();
let x_range_inclusive =
RangeInclusive::new(x_range.start, unsafe { x_range.end.offset(-1) });
unsafe { zero_volatile(x_range) };
unsafe { zero_volatile(x_range_inclusive) };
assert_eq!(x, [0, 0, 0]);
}
/// Check `bss` section layout.
#[kernel_test]
fn bss_section_is_sane() {
use crate::bsp::memory::bss_range_inclusive;
use core::mem;
let start = *bss_range_inclusive().start() as usize;
let end = *bss_range_inclusive().end() as usize;
assert_eq!(start % mem::size_of::<usize>(), 0);
assert_eq!(end % mem::size_of::<usize>(), 0);
assert!(end >= start);
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------
@ -38,26 +38,3 @@ pub unsafe fn runtime_init() -> ! {
zero_bss();
kernel_init()
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check `bss` section layout.
#[kernel_test]
fn bss_section_is_sane() {
use core::mem;
let start = bsp::memory::bss_range().start as *const _ as usize;
let end = bsp::memory::bss_range().end as *const _ as usize;
assert_eq!(start % mem::size_of::<usize>(), 0);
assert_eq!(end % mem::size_of::<usize>(), 0);
assert!(end >= start);
}
}

@ -853,7 +853,7 @@ diff -uNr 13_integrated_testing/src/_arch/aarch64/exception.rs 14_exceptions_par
//! Architectural synchronous and asynchronous exception handling.
+use crate::{bsp, exception};
use core::fmt;
use core::{cell::UnsafeCell, fmt};
use cortex_a::{barrier, regs::*};
use register::InMemoryRegister;
@@ -84,8 +85,11 @@

@ -5,7 +5,7 @@
//! Architectural synchronous and asynchronous exception handling.
use crate::{bsp, exception};
use core::fmt;
use core::{cell::UnsafeCell, fmt};
use cortex_a::{barrier, regs::*};
use register::InMemoryRegister;
@ -249,12 +249,11 @@ pub fn current_privilege_level() -> (PrivilegeLevel, &'static str) {
/// Manual.
pub unsafe fn handling_init() {
// Provided by exception.S.
extern "C" {
static mut __exception_vector_start: u64;
extern "Rust" {
static __exception_vector_start: UnsafeCell<()>;
}
let addr: u64 = &__exception_vector_start as *const _ as u64;
VBAR_EL1.set(addr);
VBAR_EL1.set(__exception_vector_start.get() as u64);
// Force VBAR update to complete before next instruction.
barrier::isb(barrier::SY);

@ -138,7 +138,7 @@ struct MemoryManagementUnit;
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut TABLES: ArchTranslationTable = ArchTranslationTable::new();
static mut KERNEL_TABLES: ArchTranslationTable = ArchTranslationTable::new();
static MMU: MemoryManagementUnit = MemoryManagementUnit;
@ -254,10 +254,10 @@ fn set_up_mair() {
///
/// - Modifies a `static mut`. Ensure it only happens from here.
unsafe fn populate_tt_entries() -> Result<(), &'static str> {
for (l2_nr, l2_entry) in TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l2_nr, l2_entry) in KERNEL_TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = KERNEL_TABLES.lvl3[l2_nr].base_addr_usize().into();
for (l3_nr, l3_entry) in TABLES.lvl3[l2_nr].iter_mut().enumerate() {
for (l3_nr, l3_entry) in KERNEL_TABLES.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << FIVETWELVE_MIB_SHIFT) + (l3_nr << SIXTYFOUR_KIB_SHIFT);
let (output_addr, attribute_fields) =
@ -313,7 +313,7 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
populate_tt_entries()?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(TABLES.lvl2.base_addr_u64());
TTBR0_EL1.set_baddr(KERNEL_TABLES.lvl2.base_addr_u64());
configure_translation_control();
@ -331,3 +331,40 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
Ok(())
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check if the size of `struct TableDescriptor` is as expected.
#[kernel_test]
fn size_of_tabledescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<TableDescriptor>(),
core::mem::size_of::<u64>()
);
}
/// Check if the size of `struct PageDescriptor` is as expected.
#[kernel_test]
fn size_of_pagedescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<PageDescriptor>(),
core::mem::size_of::<u64>()
);
}
/// Check if KERNEL_TABLES is in .bss.
#[kernel_test]
fn kernel_tables_in_bss() {
let bss_range = bsp::memory::bss_range_inclusive();
let kernel_tables_addr = unsafe { &KERNEL_TABLES as *const _ as usize as *mut u64 };
assert!(bss_range.contains(&kernel_tables_addr));
}
}

@ -37,7 +37,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -6,18 +6,18 @@
pub mod mmu;
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __ro_start: usize;
static __ro_end: usize;
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
static __ro_start: UnsafeCell<()>;
static __ro_end: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -72,7 +72,7 @@ pub(super) mod map {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_start() -> usize {
unsafe { &__ro_start as *const _ as usize }
unsafe { __ro_start.get() as usize }
}
/// Size of the Read-Only (RO) range of the kernel binary.
@ -82,7 +82,7 @@ fn ro_start() -> usize {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_end() -> usize {
unsafe { &__ro_end as *const _ as usize }
unsafe { __ro_end.get() as usize }
}
//--------------------------------------------------------------------------------------------------
@ -95,17 +95,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -6,27 +6,32 @@
pub mod mmu;
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}
@ -44,9 +49,25 @@ mod tests {
fn zero_volatile_works() {
let mut x: [usize; 3] = [10, 11, 12];
let x_range = x.as_mut_ptr_range();
let x_range_inclusive =
RangeInclusive::new(x_range.start, unsafe { x_range.end.offset(-1) });
unsafe { zero_volatile(x_range) };
unsafe { zero_volatile(x_range_inclusive) };
assert_eq!(x, [0, 0, 0]);
}
/// Check `bss` section layout.
#[kernel_test]
fn bss_section_is_sane() {
use crate::bsp::memory::bss_range_inclusive;
use core::mem;
let start = *bss_range_inclusive().start() as usize;
let end = *bss_range_inclusive().end() as usize;
assert_eq!(start % mem::size_of::<usize>(), 0);
assert_eq!(end % mem::size_of::<usize>(), 0);
assert!(end >= start);
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------
@ -38,26 +38,3 @@ pub unsafe fn runtime_init() -> ! {
zero_bss();
kernel_init()
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check `bss` section layout.
#[kernel_test]
fn bss_section_is_sane() {
use core::mem;
let start = bsp::memory::bss_range().start as *const _ as usize;
let end = bsp::memory::bss_range().end as *const _ as usize;
assert_eq!(start % mem::size_of::<usize>(), 0);
assert_eq!(end % mem::size_of::<usize>(), 0);
assert!(end >= start);
}
}

@ -452,7 +452,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs 15
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
-static mut TABLES: ArchTranslationTable = ArchTranslationTable::new();
-static mut KERNEL_TABLES: ArchTranslationTable = ArchTranslationTable::new();
+static KERNEL_TABLES: InitStateLock<ArchTranslationTable> =
+ InitStateLock::new(ArchTranslationTable::new());
@ -591,10 +591,10 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs 15
-///
-/// - Modifies a `static mut`. Ensure it only happens from here.
-unsafe fn populate_tt_entries() -> Result<(), &'static str> {
- for (l2_nr, l2_entry) in TABLES.lvl2.iter_mut().enumerate() {
- *l2_entry = TABLES.lvl3[l2_nr].base_addr_usize().into();
- for (l2_nr, l2_entry) in KERNEL_TABLES.lvl2.iter_mut().enumerate() {
- *l2_entry = KERNEL_TABLES.lvl3[l2_nr].base_addr_usize().into();
-
- for (l3_nr, l3_entry) in TABLES.lvl3[l2_nr].iter_mut().enumerate() {
- for (l3_nr, l3_entry) in KERNEL_TABLES.lvl3[l2_nr].iter_mut().enumerate() {
- let virt_addr = (l2_nr << FIVETWELVE_MIB_SHIFT) + (l3_nr << SIXTYFOUR_KIB_SHIFT);
-
- let (output_addr, attribute_fields) =
@ -678,14 +678,14 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs 15
+ if p.len() != v.len() {
+ return Err("Tried to map page slices with unequal sizes");
+ }
+
-impl memory::mmu::interface::MMU for MemoryManagementUnit {
- unsafe fn init(&self) -> Result<(), &'static str> {
+ // No work to do for empty slices.
+ if p.is_empty() {
+ return Ok(());
+ }
-impl memory::mmu::interface::MMU for MemoryManagementUnit {
- unsafe fn init(&self) -> Result<(), &'static str> {
+
+ if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page() {
+ return Err("Tried to map outside of physical address space");
+ }
@ -757,55 +757,30 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs 15
- populate_tt_entries()?;
-
// Set the "Translation Table Base Register".
- TTBR0_EL1.set_baddr(TABLES.lvl2.base_addr_u64());
- TTBR0_EL1.set_baddr(KERNEL_TABLES.lvl2.base_addr_u64());
+ TTBR0_EL1.set_baddr(phys_kernel_table_base_addr.into_usize() as u64);
configure_translation_control();
@@ -331,3 +511,43 @@
Ok(())
}
}
+
+//--------------------------------------------------------------------------------------------------
+// Testing
+//--------------------------------------------------------------------------------------------------
+
+#[cfg(test)]
@@ -337,6 +517,9 @@
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
+pub(in crate::memory::mmu) type MinSizeArchTranslationTable = FixedSizeTranslationTable<1>;
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use test_macros::kernel_test;
+
+ /// Check if the size of `struct TableDescriptor` is as expected.
+ #[kernel_test]
+ fn size_of_tabledescriptor_equals_64_bit() {
+ assert_eq!(
+ core::mem::size_of::<TableDescriptor>(),
+ core::mem::size_of::<u64>()
+ );
+ }
+
+ /// Check if the size of `struct PageDescriptor` is as expected.
+ #[kernel_test]
+ fn size_of_pagedescriptor_equals_64_bit() {
+ assert_eq!(
+ core::mem::size_of::<PageDescriptor>(),
+ core::mem::size_of::<u64>()
+ );
+ }
+
+ /// Check if KERNEL_TABLES is in .bss.
+ #[kernel_test]
+ fn kernel_tables_in_bss() {
+ let bss_range = bsp::memory::bss_range();
mod tests {
use super::*;
use test_macros::kernel_test;
@@ -363,7 +546,7 @@
#[kernel_test]
fn kernel_tables_in_bss() {
let bss_range = bsp::memory::bss_range_inclusive();
- let kernel_tables_addr = unsafe { &KERNEL_TABLES as *const _ as usize as *mut u64 };
+ let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64;
+
+ assert!(bss_range.contains(&kernel_tables_addr));
+ }
+}
assert!(bss_range.contains(&kernel_tables_addr));
}
diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gicc.rs 15_virtual_mem_part2_mmio_remap/src/bsp/device_driver/arm/gicv2/gicc.rs
--- 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gicc.rs
@ -1467,7 +1442,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/link.ld 15_vir
+++ 15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/link.ld
@@ -39,6 +39,11 @@
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
+ . = ALIGN(65536);
+ __data_end = .;
@ -1749,18 +1724,16 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs 15_v
pub mod mmu;
+use crate::memory::mmu::{Address, Physical, Virtual};
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
@@ -15,36 +47,41 @@
// Symbols from the linker script.
extern "C" {
static __ro_start: usize;
- static __ro_end: usize;
+ static __ro_size: usize;
static __bss_start: usize;
static __bss_end: usize;
+ static __data_size: usize;
@@ -17,34 +49,39 @@
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
static __ro_start: UnsafeCell<()>;
- static __ro_end: UnsafeCell<()>;
+ static __ro_size: UnsafeCell<()>;
+ static __data_size: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -1841,9 +1814,9 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs 15_v
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
-fn ro_start() -> usize {
- unsafe { &__ro_start as *const _ as usize }
- unsafe { __ro_start.get() as usize }
+fn virt_ro_start() -> Address<Virtual> {
+ Address::new(unsafe { &__ro_start as *const _ as usize })
+ Address::new(unsafe { __ro_start.get() as usize })
}
/// Size of the Read-Only (RO) range of the kernel binary.
@ -1852,9 +1825,9 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs 15_v
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
-fn ro_end() -> usize {
- unsafe { &__ro_end as *const _ as usize }
- unsafe { __ro_end.get() as usize }
+fn ro_size() -> usize {
+ unsafe { &__ro_size as *const _ as usize }
+ unsafe { __ro_size.get() as usize }
+}
+
+/// Start address of the data range.
@ -1870,7 +1843,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs 15_v
+/// - Value is provided by the linker script and must be trusted as-is.
+#[inline(always)]
+fn data_size() -> usize {
+ unsafe { &__data_size as *const _ as usize }
+ unsafe { __data_size.get() as usize }
+}
+
+/// Start address of the boot core's stack.
@ -1904,7 +1877,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs 15_v
+ Address::new(end)
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi.rs 15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi.rs
--- 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi.rs

@ -5,7 +5,7 @@
//! Architectural synchronous and asynchronous exception handling.
use crate::{bsp, exception};
use core::fmt;
use core::{cell::UnsafeCell, fmt};
use cortex_a::{barrier, regs::*};
use register::InMemoryRegister;
@ -249,12 +249,11 @@ pub fn current_privilege_level() -> (PrivilegeLevel, &'static str) {
/// Manual.
pub unsafe fn handling_init() {
// Provided by exception.S.
extern "C" {
static mut __exception_vector_start: u64;
extern "Rust" {
static __exception_vector_start: UnsafeCell<()>;
}
let addr: u64 = &__exception_vector_start as *const _ as u64;
VBAR_EL1.set(addr);
VBAR_EL1.set(__exception_vector_start.get() as u64);
// Force VBAR update to complete before next instruction.
barrier::isb(barrier::SY);

@ -545,7 +545,7 @@ mod tests {
/// Check if KERNEL_TABLES is in .bss.
#[kernel_test]
fn kernel_tables_in_bss() {
let bss_range = bsp::memory::bss_range();
let bss_range = bsp::memory::bss_range_inclusive();
let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64;
assert!(bss_range.contains(&kernel_tables_addr));

@ -37,7 +37,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
. = ALIGN(65536);
__data_end = .;

@ -38,19 +38,19 @@
pub mod mmu;
use crate::memory::mmu::{Address, Physical, Virtual};
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __ro_start: usize;
static __ro_size: usize;
static __bss_start: usize;
static __bss_end: usize;
static __data_size: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
static __ro_start: UnsafeCell<()>;
static __ro_size: UnsafeCell<()>;
static __data_size: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
@ -118,7 +118,7 @@ pub(super) mod map {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_ro_start() -> Address<Virtual> {
Address::new(unsafe { &__ro_start as *const _ as usize })
Address::new(unsafe { __ro_start.get() as usize })
}
/// Size of the Read-Only (RO) range of the kernel binary.
@ -128,7 +128,7 @@ fn virt_ro_start() -> Address<Virtual> {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn ro_size() -> usize {
unsafe { &__ro_size as *const _ as usize }
unsafe { __ro_size.get() as usize }
}
/// Start address of the data range.
@ -144,7 +144,7 @@ fn virt_data_start() -> Address<Virtual> {
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn data_size() -> usize {
unsafe { &__data_size as *const _ as usize }
unsafe { __data_size.get() as usize }
}
/// Start address of the boot core's stack.
@ -177,17 +177,12 @@ pub fn phys_boot_core_stack_end() -> Address<Physical> {
Address::new(end)
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -6,27 +6,32 @@
pub mod mmu;
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}
@ -44,9 +49,25 @@ mod tests {
fn zero_volatile_works() {
let mut x: [usize; 3] = [10, 11, 12];
let x_range = x.as_mut_ptr_range();
let x_range_inclusive =
RangeInclusive::new(x_range.start, unsafe { x_range.end.offset(-1) });
unsafe { zero_volatile(x_range) };
unsafe { zero_volatile(x_range_inclusive) };
assert_eq!(x, [0, 0, 0]);
}
/// Check `bss` section layout.
#[kernel_test]
fn bss_section_is_sane() {
use crate::bsp::memory::bss_range_inclusive;
use core::mem;
let start = *bss_range_inclusive().start() as usize;
let end = *bss_range_inclusive().end() as usize;
assert_eq!(start % mem::size_of::<usize>(), 0);
assert_eq!(end % mem::size_of::<usize>(), 0);
assert!(end >= start);
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------
@ -38,26 +38,3 @@ pub unsafe fn runtime_init() -> ! {
zero_bss();
kernel_init()
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check `bss` section layout.
#[kernel_test]
fn bss_section_is_sane() {
use core::mem;
let start = bsp::memory::bss_range().start as *const _ as usize;
let end = bsp::memory::bss_range().end as *const _ as usize;
assert_eq!(start % mem::size_of::<usize>(), 0);
assert_eq!(end % mem::size_of::<usize>(), 0);
assert!(end >= start);
}
}

Binary file not shown.

Binary file not shown.

@ -29,7 +29,7 @@ SECTIONS
__bss_start = .;
*(.bss*);
. = ALIGN(8);
__bss_end = .;
__bss_end_inclusive = . - 8;
}
/DISCARD/ : { *(.comment*) }

@ -4,16 +4,16 @@
//! BSP Memory Management.
use core::ops::Range;
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "C" {
static __bss_start: usize;
static __bss_end: usize;
extern "Rust" {
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
}
//--------------------------------------------------------------------------------------------------
@ -59,17 +59,12 @@ pub fn boot_core_stack_end() -> usize {
map::BOOT_CORE_STACK_END
}
/// Return the range spanning the .bss section.
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range() -> Range<*mut u64> {
unsafe {
Range {
start: &__bss_start as *const _ as *mut u64,
end: &__bss_end as *const _ as *mut u64,
}
}
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
unsafe { RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get()) }
}

@ -4,26 +4,31 @@
//! Memory Management.
use core::ops::Range;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Zero out a memory range.
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: Range<*mut T>)
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = range.start;
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr < range.end {
loop {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
if ptr > end_inclusive {
break;
}
}
}

@ -17,7 +17,7 @@ use crate::{bsp, memory};
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range());
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------

Loading…
Cancel
Save