Some rework on virtual memory code

- Mostly more spearation of concerns in 15.
- Cleanups in other parts.
pull/110/head
Andre Richter 3 years ago
parent eb2bee6bb1
commit d09374710d
No known key found for this signature in database
GPG Key ID: 2116C1AB102F615E

@ -115,12 +115,12 @@ descriptors).
In `translation_table.rs`, there is a definition of the actual translation table struct which is
generic over the number of `LVL2` tables. The latter depends on the size of the target board's
memory. Naturally, the `BSP` knows these details about the target board, and provides the size
through the constant `bsp::memory::mmu::KernelAddrSpaceSize::SIZE`.
through the constant `bsp::memory::mmu::KernelAddrSpace::SIZE`.
This information is used by `translation_table.rs` to calculate the number of needed `LVL2` tables.
Since one `LVL2` table in a `64 KiB` configuration covers `512 MiB`, all that needs to be done is to
divide `KernelAddrSpaceSize::SIZE` by `512 MiB` (there are several compile-time checks in place that
ensure that `KernelAddrSpaceSize` is a multiple of `512 MiB`).
divide `KernelAddrSpace::SIZE` by `512 MiB` (there are several compile-time checks in place that
ensure that `KernelAddrSpace::SIZE` is a multiple of `512 MiB`).
The final table type is exported as `KernelTranslationTable`. Below is the respective excerpt from
`translation_table.rs`:
@ -144,7 +144,7 @@ struct PageDescriptor {
value: u64,
}
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT;
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -175,10 +175,6 @@ tables:
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static mut KERNEL_TABLES: KernelTranslationTable = KernelTranslationTable::new();
```
@ -213,10 +209,10 @@ Afterwards, the [Translation Table Base Register 0 - EL1] is set up with the bas
`lvl2` tables and the [Translation Control Register - EL1] is configured:
```rust
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address());
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
self.configure_translation_control();
self.configure_translation_control();
```
Finally, the `MMU` is turned on through the [System Control Register - EL1]. The last step also
@ -297,7 +293,7 @@ unsafe fn kernel_init() -> ! {
use driver::interface::DriverManager;
use memory::mmu::interface::MMU;
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", string);
}
```
@ -439,8 +435,8 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+ AttrIndx OFFSET(2) NUMBITS(3) [],
+
+ TYPE OFFSET(1) NUMBITS(1) [
+ Block = 0,
+ Table = 1
+ Reserved_Invalid = 0,
+ Page = 1
+ ],
+
+ VALID OFFSET(0) NUMBITS(1) [
@ -468,19 +464,19 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+ value: u64,
+}
+
+trait BaseAddr {
+ fn base_addr_u64(&self) -> u64;
+ fn base_addr_usize(&self) -> usize;
+trait StartAddr {
+ fn phys_start_addr_u64(&self) -> u64;
+ fn phys_start_addr_usize(&self) -> usize;
+}
+
+const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT;
+const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
+
+//--------------------------------------------------------------------------------------------------
+// Public Definitions
+//--------------------------------------------------------------------------------------------------
+
+/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
+/// aligned, hence the "reverse" order of appearance.
+/// aligned, so the lvl3 is put first.
+#[repr(C)]
+#[repr(align(65536))]
+pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
@ -498,12 +494,13 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+// Private Code
+//--------------------------------------------------------------------------------------------------
+
+impl<T, const N: usize> BaseAddr for [T; N] {
+ fn base_addr_u64(&self) -> u64 {
+// The binary is still identity mapped, so we don't need to convert here.
+impl<T, const N: usize> StartAddr for [T; N] {
+ fn phys_start_addr_u64(&self) -> u64 {
+ self as *const T as u64
+ }
+
+ fn base_addr_usize(&self) -> usize {
+ fn phys_start_addr_usize(&self) -> usize {
+ self as *const _ as usize
+ }
+}
@ -517,14 +514,14 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+ }
+
+ /// Create an instance pointing to the supplied address.
+ pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self {
+ pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self {
+ let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
+
+ let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT;
+ let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT;
+ val.write(
+ STAGE1_TABLE_DESCRIPTOR::VALID::True
+ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ + STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64),
+ + STAGE1_TABLE_DESCRIPTOR::VALID::True,
+ );
+
+ TableDescriptor { value: val.get() }
@ -577,16 +574,16 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+ }
+
+ /// Create an instance.
+ pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self {
+ pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
+ let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
+
+ let shifted = output_addr as u64 >> Granule64KiB::SHIFT;
+ let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
+ val.write(
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
+ + STAGE1_PAGE_DESCRIPTOR::AF::True
+ + attribute_fields.into()
+ + STAGE1_PAGE_DESCRIPTOR::TYPE::Table
+ + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted),
+ + STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ + STAGE1_PAGE_DESCRIPTOR::VALID::True
+ + attribute_fields.clone().into(),
+ );
+
+ Self { value: val.get() }
@ -599,10 +596,9 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+
+impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
+ /// Create an instance.
+ #[allow(clippy::assertions_on_constants)]
+ pub const fn new() -> Self {
+ // Can't have a zero-sized address space.
+ assert!(NUM_TABLES > 0);
+ assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE modulo Granule512MiB::SIZE) == 0);
+
+ Self {
+ lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
@ -618,15 +614,15 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+ pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> {
+ for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
+ *l2_entry =
+ TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
+ TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize());
+
+ for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
+ let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
+
+ let (output_addr, attribute_fields) =
+ let (phys_output_addr, attribute_fields) =
+ bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
+
+ *l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields);
+ *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
+ }
+ }
+
@ -634,15 +630,15 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1
+ }
+
+ /// The translation table's base address to be used for programming the MMU.
+ pub fn base_address(&self) -> u64 {
+ self.lvl2.base_addr_u64()
+ pub fn phys_base_address(&self) -> u64 {
+ self.lvl2.phys_start_addr_u64()
+ }
+}
diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs
--- 10_privilege_level/src/_arch/aarch64/memory/mmu.rs
+++ 11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs
@@ -0,0 +1,146 @@
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
@ -662,6 +658,7 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+ bsp, memory,
+ memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule},
+};
+use core::intrinsics::unlikely;
+use cortex_a::{barrier, regs::*};
+
+//--------------------------------------------------------------------------------------------------
@ -678,15 +675,6 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
+pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
+
+/// The min supported address space size.
+pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB
+
+/// The max supported address space size.
+pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB
+
+/// The supported address space size granule.
+pub type AddrSpaceSizeGranule = Granule512MiB;
+
+/// Constants for indexing the MAIR_EL1.
+#[allow(dead_code)]
+pub mod mair {
@ -711,6 +699,18 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+// Private Code
+//--------------------------------------------------------------------------------------------------
+
+impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
+ /// Checks for architectural restrictions.
+ pub const fn arch_address_space_size_sanity_checks() {
+ // Size must be at least one full 512 MiB table.
+ assert!((AS_SIZE modulo Granule512MiB::SIZE) == 0);
+
+ // Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
+ // version.
+ assert!(AS_SIZE <= (1 << 48));
+ }
+}
+
+impl MemoryManagementUnit {
+ /// Setup function for the MAIR_EL1 register.
+ fn set_up_mair(&self) {
@ -727,19 +727,19 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+
+ /// Configure various settings of stage 1 of the EL1 translation regime.
+ fn configure_translation_control(&self) {
+ let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
+ let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64;
+ let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64;
+
+ TCR_EL1.write(
+ TCR_EL1::TBI0::Ignored
+ + TCR_EL1::IPS.val(ips)
+ + TCR_EL1::EPD1::DisableTTBR1Walks
+ TCR_EL1::TBI0::Used
+ + TCR_EL1::IPS::Bits_40
+ + TCR_EL1::TG0::KiB_64
+ + TCR_EL1::SH0::Inner
+ + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ + TCR_EL1::EPD0::EnableTTBR0Walks
+ + TCR_EL1::T0SZ.val(t0sz),
+ + TCR_EL1::A1::TTBR0
+ + TCR_EL1::T0SZ.val(t0sz)
+ + TCR_EL1::EPD1::DisableTTBR1Walks,
+ );
+ }
+}
@ -756,22 +756,31 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+//------------------------------------------------------------------------------
+// OS Interface Code
+//------------------------------------------------------------------------------
+use memory::mmu::MMUEnableError;
+
+impl memory::mmu::interface::MMU for MemoryManagementUnit {
+ unsafe fn init(&self) -> Result<(), &'static str> {
+ // Fail early if translation granule is not supported. Both RPis support it, though.
+ if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) {
+ return Err("Translation granule not supported in HW");
+ unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> {
+ if unlikely(self.is_enabled()) {
+ return Err(MMUEnableError::AlreadyEnabled);
+ }
+
+ // Fail early if translation granule is not supported.
+ if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
+ return Err(MMUEnableError::Other(
+ "Translation granule not supported in HW",
+ ));
+ }
+
+ // Prepare the memory attribute indirection register.
+ self.set_up_mair();
+
+ // Populate translation tables.
+ KERNEL_TABLES.populate_tt_entries()?;
+ KERNEL_TABLES
+ .populate_tt_entries()
+ .map_err(|e| MMUEnableError::Other(e))?;
+
+ // Set the "Translation Table Base Register".
+ TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address());
+ TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
+
+ self.configure_translation_control();
+
@ -788,6 +797,11 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part
+
+ Ok(())
+ }
+
+ #[inline(always)]
+ fn is_enabled(&self) -> bool {
+ SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
+ }
+}
diff -uNr 10_privilege_level/src/bsp/raspberrypi/link.ld 11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/link.ld
@ -829,8 +843,8 @@ diff -uNr 10_privilege_level/src/bsp/raspberrypi/memory/mmu.rs 11_virtual_mem_pa
+// Public Definitions
+//--------------------------------------------------------------------------------------------------
+
+/// The address space size chosen by this BSP.
+pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>;
+/// The kernel's address space defined by this BSP.
+pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
+
+const NUM_MEM_RANGES: usize = 3;
+
@ -1006,7 +1020,7 @@ diff -uNr 10_privilege_level/src/bsp.rs 11_virtual_mem_part1_identity_mapping/sr
diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/src/main.rs
--- 10_privilege_level/src/main.rs
+++ 11_virtual_mem_part1_identity_mapping/src/main.rs
@@ -108,7 +108,10 @@
@@ -108,7 +108,11 @@
//! [`runtime_init::runtime_init()`]: runtime_init/fn.runtime_init.html
#![allow(clippy::clippy::upper_case_acronyms)]
@ -1014,16 +1028,17 @@ diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/s
#![feature(const_fn_fn_ptr_basics)]
+#![feature(const_generics)]
+#![feature(const_panic)]
+#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(panic_info_message)]
#![feature(trait_alias)]
@@ -132,9 +135,18 @@
@@ -132,9 +136,18 @@
/// # Safety
///
/// - Only a single core must be active and running this function.
-/// - The init calls in this function must appear in the correct order.
+/// - The init calls in this function must appear in the correct order:
+/// - Virtual memory must be activated before the device drivers.
+/// - Caching must be activated before the device drivers.
+/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
+/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on
+/// the RPi SoCs.
@ -1031,13 +1046,13 @@ diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/s
use driver::interface::DriverManager;
+ use memory::mmu::interface::MMU;
+
+ if let Err(string) = memory::mmu::mmu().init() {
+ if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
+ panic!("MMU: {}", string);
+ }
for i in bsp::driver::driver_manager().all_device_drivers().iter() {
if let Err(x) = i.init() {
@@ -158,6 +170,9 @@
@@ -158,6 +171,9 @@
info!("Booting on: {}", bsp::board_name());
@ -1047,7 +1062,7 @@ diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/s
let (_, privilege_level) = exception::current_privilege_level();
info!("Current privilege level: {}", privilege_level);
@@ -181,6 +196,13 @@
@@ -181,6 +197,13 @@
info!("Timer test, spinning for 1 second");
time::time_manager().spin_for(Duration::from_secs(1));
@ -1084,7 +1099,7 @@ diff -uNr 10_privilege_level/src/memory/mmu/translation_table.rs 11_virtual_mem_
diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs
--- 10_privilege_level/src/memory/mmu.rs
+++ 11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs
@@ -0,0 +1,247 @@
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
@ -1118,8 +1133,17 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map
+// Public Definitions
+//--------------------------------------------------------------------------------------------------
+
+/// MMU enable errors variants.
+#[allow(missing_docs)]
+#[derive(Debug)]
+pub enum MMUEnableError {
+ AlreadyEnabled,
+ Other(&'static str),
+}
+
+/// Memory Management interfaces.
+pub mod interface {
+ use super::*;
+
+ /// MMU functions.
+ pub trait MMU {
@ -1129,15 +1153,18 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map
+ /// # Safety
+ ///
+ /// - Changes the HW's global state.
+ unsafe fn init(&self) -> Result<(), &'static str>;
+ unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>;
+
+ /// Returns true if the MMU is enabled, false otherwise.
+ fn is_enabled(&self) -> bool;
+ }
+}
+
+/// Describes the characteristics of a translation granule.
+pub struct TranslationGranule<const GRANULE_SIZE: usize>;
+
+/// Describes the size of an address space.
+pub struct AddressSpaceSize<const AS_SIZE: usize>;
+/// Describes properties of an address space.
+pub struct AddressSpace<const AS_SIZE: usize>;
+
+/// Architecture agnostic translation types.
+#[allow(missing_docs)]
@ -1195,6 +1222,15 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map
+// Public Code
+//--------------------------------------------------------------------------------------------------
+
+impl fmt::Display for MMUEnableError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
+ MMUEnableError::Other(x) => write!(f, "{}", x),
+ }
+ }
+}
+
+impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
+ /// The granule's size.
+ pub const SIZE: usize = Self::size_checked();
@ -1209,22 +1245,18 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map
+ }
+}
+
+impl<const AS_SIZE: usize> AddressSpaceSize<AS_SIZE> {
+impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
+ /// The address space size.
+ pub const SIZE: usize = Self::size_checked();
+
+ /// The address space shift, aka log2(size).
+ pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
+ pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
+
+ const fn size_checked() -> usize {
+ assert!(AS_SIZE.is_power_of_two());
+ assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two());
+ assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two());
+
+ // Must adhere to architectural restrictions.
+ assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE);
+ assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE);
+ assert!((AS_SIZE modulo arch_mmu::AddrSpaceSizeGranule::SIZE) == 0);
+ // Check for architectural restrictions as well.
+ Self::arch_address_space_size_sanity_checks();
+
+ AS_SIZE
+ }

@ -17,6 +17,7 @@ use crate::{
bsp, memory,
memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule},
};
use core::intrinsics::unlikely;
use cortex_a::{barrier, regs::*};
//--------------------------------------------------------------------------------------------------
@ -33,15 +34,6 @@ struct MemoryManagementUnit;
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// The min supported address space size.
pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB
/// The max supported address space size.
pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB
/// The supported address space size granule.
pub type AddrSpaceSizeGranule = Granule512MiB;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
fn set_up_mair(&self) {
@ -82,19 +86,19 @@ impl MemoryManagementUnit {
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control(&self) {
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64;
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::EPD1::DisableTTBR1Walks
TCR_EL1::TBI0::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG0::KiB_64
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(t0sz),
+ TCR_EL1::A1::TTBR0
+ TCR_EL1::T0SZ.val(t0sz)
+ TCR_EL1::EPD1::DisableTTBR1Walks,
);
}
}
@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use memory::mmu::MMUEnableError;
impl memory::mmu::interface::MMU for MemoryManagementUnit {
unsafe fn init(&self) -> Result<(), &'static str> {
// Fail early if translation granule is not supported. Both RPis support it, though.
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) {
return Err("Translation granule not supported in HW");
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Populate translation tables.
KERNEL_TABLES.populate_tt_entries()?;
KERNEL_TABLES
.populate_tt_entries()
.map_err(|e| MMUEnableError::Other(e))?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address());
TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
self.configure_translation_control();
@ -143,4 +156,9 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
Ok(())
}
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
}

@ -87,8 +87,8 @@ register_bitfields! {u64,
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
@ -116,19 +116,19 @@ struct PageDescriptor {
value: u64,
}
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
trait StartAddr {
fn phys_start_addr_u64(&self) -> u64;
fn phys_start_addr_usize(&self) -> usize;
}
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT;
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, hence the "reverse" order of appearance.
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<T, const N: usize> BaseAddr for [T; N] {
fn base_addr_u64(&self) -> u64 {
// The binary is still identity mapped, so we don't need to convert here.
impl<T, const N: usize> StartAddr for [T; N] {
fn phys_start_addr_u64(&self) -> u64 {
self as *const T as u64
}
fn base_addr_usize(&self) -> usize {
fn phys_start_addr_usize(&self) -> usize {
self as *const _ as usize
}
}
@ -165,14 +166,14 @@ impl TableDescriptor {
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::VALID::True
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64),
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
@ -225,16 +226,16 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self {
pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = output_addr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::VALID::True
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ attribute_fields.into()
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Table
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted),
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ attribute_fields.clone().into(),
);
Self { value: val.get() }
@ -247,10 +248,9 @@ impl PageDescriptor {
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
pub const fn new() -> Self {
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
@ -266,15 +266,15 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> {
for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
*l2_entry =
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize());
for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
let (output_addr, attribute_fields) =
let (phys_output_addr, attribute_fields) =
bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
*l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields);
*l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
}
}
@ -282,7 +282,7 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
}
/// The translation table's base address to be used for programming the MMU.
pub fn base_address(&self) -> u64 {
self.lvl2.base_addr_u64()
pub fn phys_base_address(&self) -> u64 {
self.lvl2.phys_start_addr_u64()
}
}

@ -12,8 +12,8 @@ use core::ops::RangeInclusive;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The address space size chosen by this BSP.
pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>;
/// The kernel's address space defined by this BSP.
pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
const NUM_MEM_RANGES: usize = 3;

@ -112,6 +112,7 @@
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_generics)]
#![feature(const_panic)]
#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(panic_info_message)]
#![feature(trait_alias)]
@ -136,7 +137,7 @@ mod time;
///
/// - Only a single core must be active and running this function.
/// - The init calls in this function must appear in the correct order:
/// - Virtual memory must be activated before the device drivers.
/// - Caching must be activated before the device drivers.
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on
/// the RPi SoCs.
@ -144,7 +145,7 @@ unsafe fn kernel_init() -> ! {
use driver::interface::DriverManager;
use memory::mmu::interface::MMU;
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", string);
}

@ -31,8 +31,17 @@ pub use arch_mmu::mmu;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
@ -42,15 +51,18 @@ pub mod interface {
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn init(&self) -> Result<(), &'static str>;
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes the size of an address space.
pub struct AddressSpaceSize<const AS_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Architecture agnostic translation types.
#[allow(missing_docs)]
@ -108,6 +120,15 @@ pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
// Public Code
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
@ -122,22 +143,18 @@ impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
}
}
impl<const AS_SIZE: usize> AddressSpaceSize<AS_SIZE> {
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two());
assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two());
// Must adhere to architectural restrictions.
assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE);
assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE);
assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0);
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}

@ -900,8 +900,8 @@ diff -uNr 11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.r
--- 11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.rs
+++ 12_exceptions_part1_groundwork/src/bsp/raspberrypi/memory/mmu.rs
@@ -15,7 +15,7 @@
/// The address space size chosen by this BSP.
pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>;
/// The kernel's address space defined by this BSP.
pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
-const NUM_MEM_RANGES: usize = 3;
+const NUM_MEM_RANGES: usize = 2;
@ -967,24 +967,24 @@ diff -uNr 11_virtual_mem_part1_identity_mapping/src/exception.rs 12_exceptions_p
diff -uNr 11_virtual_mem_part1_identity_mapping/src/main.rs 12_exceptions_part1_groundwork/src/main.rs
--- 11_virtual_mem_part1_identity_mapping/src/main.rs
+++ 12_exceptions_part1_groundwork/src/main.rs
@@ -113,6 +113,7 @@
#![feature(const_generics)]
@@ -114,6 +114,7 @@
#![feature(const_panic)]
#![feature(core_intrinsics)]
#![feature(format_args_nl)]
+#![feature(global_asm)]
#![feature(panic_info_message)]
#![feature(trait_alias)]
#![no_main]
@@ -144,6 +145,8 @@
@@ -145,6 +146,8 @@
use driver::interface::DriverManager;
use memory::mmu::interface::MMU;
+ exception::handling_init();
+
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", string);
}
@@ -196,13 +199,28 @@
@@ -197,13 +200,28 @@
info!("Timer test, spinning for 1 second");
time::time_manager().spin_for(Duration::from_secs(1));

@ -17,6 +17,7 @@ use crate::{
bsp, memory,
memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule},
};
use core::intrinsics::unlikely;
use cortex_a::{barrier, regs::*};
//--------------------------------------------------------------------------------------------------
@ -33,15 +34,6 @@ struct MemoryManagementUnit;
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// The min supported address space size.
pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB
/// The max supported address space size.
pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB
/// The supported address space size granule.
pub type AddrSpaceSizeGranule = Granule512MiB;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
fn set_up_mair(&self) {
@ -82,19 +86,19 @@ impl MemoryManagementUnit {
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control(&self) {
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64;
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::EPD1::DisableTTBR1Walks
TCR_EL1::TBI0::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG0::KiB_64
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(t0sz),
+ TCR_EL1::A1::TTBR0
+ TCR_EL1::T0SZ.val(t0sz)
+ TCR_EL1::EPD1::DisableTTBR1Walks,
);
}
}
@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use memory::mmu::MMUEnableError;
impl memory::mmu::interface::MMU for MemoryManagementUnit {
unsafe fn init(&self) -> Result<(), &'static str> {
// Fail early if translation granule is not supported. Both RPis support it, though.
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) {
return Err("Translation granule not supported in HW");
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Populate translation tables.
KERNEL_TABLES.populate_tt_entries()?;
KERNEL_TABLES
.populate_tt_entries()
.map_err(|e| MMUEnableError::Other(e))?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address());
TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
self.configure_translation_control();
@ -143,4 +156,9 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
Ok(())
}
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
}

@ -87,8 +87,8 @@ register_bitfields! {u64,
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
@ -116,19 +116,19 @@ struct PageDescriptor {
value: u64,
}
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
trait StartAddr {
fn phys_start_addr_u64(&self) -> u64;
fn phys_start_addr_usize(&self) -> usize;
}
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT;
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, hence the "reverse" order of appearance.
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<T, const N: usize> BaseAddr for [T; N] {
fn base_addr_u64(&self) -> u64 {
// The binary is still identity mapped, so we don't need to convert here.
impl<T, const N: usize> StartAddr for [T; N] {
fn phys_start_addr_u64(&self) -> u64 {
self as *const T as u64
}
fn base_addr_usize(&self) -> usize {
fn phys_start_addr_usize(&self) -> usize {
self as *const _ as usize
}
}
@ -165,14 +166,14 @@ impl TableDescriptor {
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::VALID::True
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64),
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
@ -225,16 +226,16 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self {
pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = output_addr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::VALID::True
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ attribute_fields.into()
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Table
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted),
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ attribute_fields.clone().into(),
);
Self { value: val.get() }
@ -247,10 +248,9 @@ impl PageDescriptor {
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
pub const fn new() -> Self {
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
@ -266,15 +266,15 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> {
for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
*l2_entry =
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize());
for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
let (output_addr, attribute_fields) =
let (phys_output_addr, attribute_fields) =
bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
*l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields);
*l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
}
}
@ -282,7 +282,7 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
}
/// The translation table's base address to be used for programming the MMU.
pub fn base_address(&self) -> u64 {
self.lvl2.base_addr_u64()
pub fn phys_base_address(&self) -> u64 {
self.lvl2.phys_start_addr_u64()
}
}

@ -12,8 +12,8 @@ use core::ops::RangeInclusive;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The address space size chosen by this BSP.
pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>;
/// The kernel's address space defined by this BSP.
pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
const NUM_MEM_RANGES: usize = 2;

@ -112,6 +112,7 @@
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_generics)]
#![feature(const_panic)]
#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(global_asm)]
#![feature(panic_info_message)]
@ -137,7 +138,7 @@ mod time;
///
/// - Only a single core must be active and running this function.
/// - The init calls in this function must appear in the correct order:
/// - Virtual memory must be activated before the device drivers.
/// - Caching must be activated before the device drivers.
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on
/// the RPi SoCs.
@ -147,7 +148,7 @@ unsafe fn kernel_init() -> ! {
exception::handling_init();
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", string);
}

@ -31,8 +31,17 @@ pub use arch_mmu::mmu;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
@ -42,15 +51,18 @@ pub mod interface {
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn init(&self) -> Result<(), &'static str>;
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes the size of an address space.
pub struct AddressSpaceSize<const AS_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Architecture agnostic translation types.
#[allow(missing_docs)]
@ -108,6 +120,15 @@ pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
// Public Code
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
@ -122,22 +143,18 @@ impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
}
}
impl<const AS_SIZE: usize> AddressSpaceSize<AS_SIZE> {
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two());
assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two());
// Must adhere to architectural restrictions.
assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE);
assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE);
assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0);
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}

@ -1026,7 +1026,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translatio
--- 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translation_table.rs
+++ 13_integrated_testing/src/_arch/aarch64/memory/mmu/translation_table.rs
@@ -286,3 +286,31 @@
self.lvl2.base_addr_u64()
self.lvl2.phys_start_addr_u64()
}
}
+
@ -1061,8 +1061,8 @@ diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translatio
diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs 13_integrated_testing/src/_arch/aarch64/memory/mmu.rs
--- 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs
+++ 13_integrated_testing/src/_arch/aarch64/memory/mmu.rs
@@ -144,3 +144,22 @@
Ok(())
@@ -162,3 +162,22 @@
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
}
+
@ -1194,7 +1194,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/exception.rs 13_integrated_testing/
diff -uNr 12_exceptions_part1_groundwork/src/lib.rs 13_integrated_testing/src/lib.rs
--- 12_exceptions_part1_groundwork/src/lib.rs
+++ 13_integrated_testing/src/lib.rs
@@ -0,0 +1,171 @@
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: MIT OR Apache-2.0
+//
+// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
@ -1311,6 +1311,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/lib.rs 13_integrated_testing/src/li
+#![feature(const_fn_fn_ptr_basics)]
+#![feature(const_generics)]
+#![feature(const_panic)]
+#![feature(core_intrinsics)]
+#![feature(format_args_nl)]
+#![feature(global_asm)]
+#![feature(linkage)]
@ -1370,7 +1371,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/lib.rs 13_integrated_testing/src/li
diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/main.rs
--- 12_exceptions_part1_groundwork/src/main.rs
+++ 13_integrated_testing/src/main.rs
@@ -6,130 +6,12 @@
@@ -6,131 +6,12 @@
#![doc(html_logo_url = "https://git.io/JeGIp")]
//! The `kernel` binary.
@ -1480,6 +1481,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m
-#![feature(const_fn_fn_ptr_basics)]
-#![feature(const_generics)]
-#![feature(const_panic)]
-#![feature(core_intrinsics)]
+
#![feature(format_args_nl)]
-#![feature(global_asm)]
@ -1503,7 +1505,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m
/// Early init code.
///
@@ -141,6 +23,7 @@
@@ -142,6 +23,7 @@
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on
/// the RPi SoCs.
@ -1511,7 +1513,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m
unsafe fn kernel_init() -> ! {
use driver::interface::DriverManager;
use memory::mmu::interface::MMU;
@@ -167,9 +50,7 @@
@@ -168,9 +50,7 @@
fn kernel_main() -> ! {
use bsp::console::console;
use console::interface::All;
@ -1521,7 +1523,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m
info!("Booting on: {}", bsp::board_name());
@@ -196,31 +77,6 @@
@@ -197,31 +77,6 @@
info!(" {}. {}", i + 1, driver.compatible());
}
@ -1557,7 +1559,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m
diff -uNr 12_exceptions_part1_groundwork/src/memory/mmu.rs 13_integrated_testing/src/memory/mmu.rs
--- 12_exceptions_part1_groundwork/src/memory/mmu.rs
+++ 13_integrated_testing/src/memory/mmu.rs
@@ -54,7 +54,6 @@
@@ -66,7 +66,6 @@
/// Architecture agnostic translation types.
#[allow(missing_docs)]
@ -1565,7 +1567,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/memory/mmu.rs 13_integrated_testing
#[derive(Copy, Clone)]
pub enum Translation {
Identity,
@@ -244,4 +243,9 @@
@@ -261,4 +260,9 @@
info!("{}", i);
}
}
@ -1910,7 +1912,7 @@ diff -uNr 12_exceptions_part1_groundwork/tests/02_exception_sync_page_fault.rs 1
+ println!("Testing synchronous exception handling by causing a page fault");
+ println!("-------------------------------------------------------------------\n");
+
+ if let Err(string) = memory::mmu::mmu().init() {
+ if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
+ println!("MMU: {}", string);
+ cpu::qemu_exit_failure()
+ }

@ -17,6 +17,7 @@ use crate::{
bsp, memory,
memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule},
};
use core::intrinsics::unlikely;
use cortex_a::{barrier, regs::*};
//--------------------------------------------------------------------------------------------------
@ -33,15 +34,6 @@ struct MemoryManagementUnit;
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// The min supported address space size.
pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB
/// The max supported address space size.
pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB
/// The supported address space size granule.
pub type AddrSpaceSizeGranule = Granule512MiB;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
fn set_up_mair(&self) {
@ -82,19 +86,19 @@ impl MemoryManagementUnit {
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control(&self) {
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64;
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::EPD1::DisableTTBR1Walks
TCR_EL1::TBI0::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG0::KiB_64
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(t0sz),
+ TCR_EL1::A1::TTBR0
+ TCR_EL1::T0SZ.val(t0sz)
+ TCR_EL1::EPD1::DisableTTBR1Walks,
);
}
}
@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use memory::mmu::MMUEnableError;
impl memory::mmu::interface::MMU for MemoryManagementUnit {
unsafe fn init(&self) -> Result<(), &'static str> {
// Fail early if translation granule is not supported. Both RPis support it, though.
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) {
return Err("Translation granule not supported in HW");
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Populate translation tables.
KERNEL_TABLES.populate_tt_entries()?;
KERNEL_TABLES
.populate_tt_entries()
.map_err(|e| MMUEnableError::Other(e))?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address());
TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
self.configure_translation_control();
@ -143,6 +156,11 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
Ok(())
}
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
}
//--------------------------------------------------------------------------------------------------

@ -87,8 +87,8 @@ register_bitfields! {u64,
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
@ -116,19 +116,19 @@ struct PageDescriptor {
value: u64,
}
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
trait StartAddr {
fn phys_start_addr_u64(&self) -> u64;
fn phys_start_addr_usize(&self) -> usize;
}
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT;
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, hence the "reverse" order of appearance.
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<T, const N: usize> BaseAddr for [T; N] {
fn base_addr_u64(&self) -> u64 {
// The binary is still identity mapped, so we don't need to convert here.
impl<T, const N: usize> StartAddr for [T; N] {
fn phys_start_addr_u64(&self) -> u64 {
self as *const T as u64
}
fn base_addr_usize(&self) -> usize {
fn phys_start_addr_usize(&self) -> usize {
self as *const _ as usize
}
}
@ -165,14 +166,14 @@ impl TableDescriptor {
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::VALID::True
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64),
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
@ -225,16 +226,16 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self {
pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = output_addr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::VALID::True
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ attribute_fields.into()
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Table
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted),
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ attribute_fields.clone().into(),
);
Self { value: val.get() }
@ -247,10 +248,9 @@ impl PageDescriptor {
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
pub const fn new() -> Self {
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
@ -266,15 +266,15 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> {
for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
*l2_entry =
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize());
for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
let (output_addr, attribute_fields) =
let (phys_output_addr, attribute_fields) =
bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
*l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields);
*l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
}
}
@ -282,8 +282,8 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
}
/// The translation table's base address to be used for programming the MMU.
pub fn base_address(&self) -> u64 {
self.lvl2.base_addr_u64()
pub fn phys_base_address(&self) -> u64 {
self.lvl2.phys_start_addr_u64()
}
}

@ -12,8 +12,8 @@ use core::ops::RangeInclusive;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The address space size chosen by this BSP.
pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>;
/// The kernel's address space defined by this BSP.
pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
const NUM_MEM_RANGES: usize = 2;

@ -114,6 +114,7 @@
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_generics)]
#![feature(const_panic)]
#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(global_asm)]
#![feature(linkage)]

@ -19,7 +19,7 @@ use libkernel::{bsp, console, driver, exception, info, memory, time};
///
/// - Only a single core must be active and running this function.
/// - The init calls in this function must appear in the correct order:
/// - Virtual memory must be activated before the device drivers.
/// - Caching must be activated before the device drivers.
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on
/// the RPi SoCs.
@ -30,7 +30,7 @@ unsafe fn kernel_init() -> ! {
exception::handling_init();
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", string);
}

@ -31,8 +31,17 @@ pub use arch_mmu::mmu;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
@ -42,15 +51,18 @@ pub mod interface {
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn init(&self) -> Result<(), &'static str>;
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes the size of an address space.
pub struct AddressSpaceSize<const AS_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Architecture agnostic translation types.
#[allow(missing_docs)]
@ -107,6 +119,15 @@ pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
// Public Code
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
@ -121,22 +142,18 @@ impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
}
}
impl<const AS_SIZE: usize> AddressSpaceSize<AS_SIZE> {
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two());
assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two());
// Must adhere to architectural restrictions.
assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE);
assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE);
assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0);
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}

@ -29,7 +29,7 @@ unsafe fn kernel_init() -> ! {
println!("Testing synchronous exception handling by causing a page fault");
println!("-------------------------------------------------------------------\n");
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
println!("MMU: {}", string);
cpu::qemu_exit_failure()
}

@ -2296,7 +2296,7 @@ diff -uNr 13_integrated_testing/src/exception/asynchronous.rs 14_exceptions_part
diff -uNr 13_integrated_testing/src/lib.rs 14_exceptions_part2_peripheral_IRQs/src/lib.rs
--- 13_integrated_testing/src/lib.rs
+++ 14_exceptions_part2_peripheral_IRQs/src/lib.rs
@@ -111,9 +111,11 @@
@@ -111,6 +111,7 @@
#![allow(clippy::clippy::upper_case_acronyms)]
#![allow(incomplete_features)]
@ -2304,11 +2304,7 @@ diff -uNr 13_integrated_testing/src/lib.rs 14_exceptions_part2_peripheral_IRQs/s
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_generics)]
#![feature(const_panic)]
+#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(global_asm)]
#![feature(linkage)]
@@ -137,6 +139,7 @@
@@ -138,6 +139,7 @@
pub mod exception;
pub mod memory;
pub mod print;
@ -2331,7 +2327,7 @@ diff -uNr 13_integrated_testing/src/main.rs 14_exceptions_part2_peripheral_IRQs/
///
@@ -21,8 +21,8 @@
/// - The init calls in this function must appear in the correct order:
/// - Virtual memory must be activated before the device drivers.
/// - Caching must be activated before the device drivers.
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
-/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on
-/// the RPi SoCs.
@ -2590,7 +2586,7 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip
type Data = T;
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
@@ -72,6 +110,32 @@
@@ -72,6 +110,50 @@
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };
@ -2614,14 +2610,32 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip
+
+ let data = unsafe { &mut *self.data.get() };
+
+ f(data)
+ }
f(data)
}
+
+ fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R {
+ let data = unsafe { &*self.data.get() };
+
f(data)
}
+ f(data)
+ }
+}
+
+//--------------------------------------------------------------------------------------------------
+// Testing
+//--------------------------------------------------------------------------------------------------
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use test_macros::kernel_test;
+
+ /// InitStateLock must be transparent.
+ #[kernel_test]
+ fn init_state_lock_is_transparent() {
+ use core::mem::size_of;
+
+ assert_eq!(size_of::<InitStateLock<u64>>(), size_of::<u64>());
+ }
}
diff -uNr 13_integrated_testing/tests/03_exception_irq_sanity.rs 14_exceptions_part2_peripheral_IRQs/tests/03_exception_irq_sanity.rs

@ -17,6 +17,7 @@ use crate::{
bsp, memory,
memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule},
};
use core::intrinsics::unlikely;
use cortex_a::{barrier, regs::*};
//--------------------------------------------------------------------------------------------------
@ -33,15 +34,6 @@ struct MemoryManagementUnit;
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// The min supported address space size.
pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB
/// The max supported address space size.
pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB
/// The supported address space size granule.
pub type AddrSpaceSizeGranule = Granule512MiB;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
fn set_up_mair(&self) {
@ -82,19 +86,19 @@ impl MemoryManagementUnit {
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control(&self) {
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64;
let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::EPD1::DisableTTBR1Walks
TCR_EL1::TBI0::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG0::KiB_64
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(t0sz),
+ TCR_EL1::A1::TTBR0
+ TCR_EL1::T0SZ.val(t0sz)
+ TCR_EL1::EPD1::DisableTTBR1Walks,
);
}
}
@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use memory::mmu::MMUEnableError;
impl memory::mmu::interface::MMU for MemoryManagementUnit {
unsafe fn init(&self) -> Result<(), &'static str> {
// Fail early if translation granule is not supported. Both RPis support it, though.
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) {
return Err("Translation granule not supported in HW");
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Populate translation tables.
KERNEL_TABLES.populate_tt_entries()?;
KERNEL_TABLES
.populate_tt_entries()
.map_err(|e| MMUEnableError::Other(e))?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address());
TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address());
self.configure_translation_control();
@ -143,6 +156,11 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
Ok(())
}
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
}
//--------------------------------------------------------------------------------------------------

@ -87,8 +87,8 @@ register_bitfields! {u64,
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
@ -116,19 +116,19 @@ struct PageDescriptor {
value: u64,
}
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
trait StartAddr {
fn phys_start_addr_u64(&self) -> u64;
fn phys_start_addr_usize(&self) -> usize;
}
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT;
const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, hence the "reverse" order of appearance.
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
// Private Code
//--------------------------------------------------------------------------------------------------
impl<T, const N: usize> BaseAddr for [T; N] {
fn base_addr_u64(&self) -> u64 {
// The binary is still identity mapped, so we don't need to convert here.
impl<T, const N: usize> StartAddr for [T; N] {
fn phys_start_addr_u64(&self) -> u64 {
self as *const T as u64
}
fn base_addr_usize(&self) -> usize {
fn phys_start_addr_usize(&self) -> usize {
self as *const _ as usize
}
}
@ -165,14 +166,14 @@ impl TableDescriptor {
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::VALID::True
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64),
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
@ -225,16 +226,16 @@ impl PageDescriptor {
}
/// Create an instance.
pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self {
pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = output_addr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::VALID::True
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ attribute_fields.into()
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Table
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted),
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ attribute_fields.clone().into(),
);
Self { value: val.get() }
@ -247,10 +248,9 @@ impl PageDescriptor {
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
pub const fn new() -> Self {
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
@ -266,15 +266,15 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> {
for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() {
*l2_entry =
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize());
TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize());
for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT);
let (output_addr, attribute_fields) =
let (phys_output_addr, attribute_fields) =
bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?;
*l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields);
*l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields);
}
}
@ -282,8 +282,8 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
}
/// The translation table's base address to be used for programming the MMU.
pub fn base_address(&self) -> u64 {
self.lvl2.base_addr_u64()
pub fn phys_base_address(&self) -> u64 {
self.lvl2.phys_start_addr_u64()
}
}

@ -12,8 +12,8 @@ use core::ops::RangeInclusive;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The address space size chosen by this BSP.
pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>;
/// The kernel's address space defined by this BSP.
pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>;
const NUM_MEM_RANGES: usize = 2;

@ -19,7 +19,7 @@ use libkernel::{bsp, cpu, driver, exception, info, memory, state, time, warn};
///
/// - Only a single core must be active and running this function.
/// - The init calls in this function must appear in the correct order:
/// - Virtual memory must be activated before the device drivers.
/// - Caching must be activated before the device drivers.
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
/// drivers (which currently employ IRQSafeNullLocks instead of spinlocks), will fail to
/// work on the RPi SoCs.
@ -30,7 +30,7 @@ unsafe fn kernel_init() -> ! {
exception::handling_init();
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
panic!("MMU: {}", string);
}

@ -31,8 +31,17 @@ pub use arch_mmu::mmu;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
@ -42,15 +51,18 @@ pub mod interface {
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn init(&self) -> Result<(), &'static str>;
unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes the size of an address space.
pub struct AddressSpaceSize<const AS_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Architecture agnostic translation types.
#[allow(missing_docs)]
@ -107,6 +119,15 @@ pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
// Public Code
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
@ -121,22 +142,18 @@ impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
}
}
impl<const AS_SIZE: usize> AddressSpaceSize<AS_SIZE> {
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two());
assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two());
// Must adhere to architectural restrictions.
assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE);
assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE);
assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0);
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}

@ -139,3 +139,21 @@ impl<T> interface::ReadWriteEx for InitStateLock<T> {
f(data)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// InitStateLock must be transparent.
#[kernel_test]
fn init_state_lock_is_transparent() {
use core::mem::size_of;
assert_eq!(size_of::<InitStateLock<u64>>(), size_of::<u64>());
}
}

@ -29,7 +29,7 @@ unsafe fn kernel_init() -> ! {
println!("Testing synchronous exception handling by causing a page fault");
println!("-------------------------------------------------------------------\n");
if let Err(string) = memory::mmu::mmu().init() {
if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() {
println!("MMU: {}", string);
cpu::qemu_exit_failure()
}

File diff suppressed because it is too large Load Diff

@ -15,12 +15,9 @@
use crate::{
bsp, memory,
memory::{
mmu::{translation_table::KernelTranslationTable, TranslationGranule},
Address, Physical,
},
synchronization::InitStateLock,
memory::{mmu::TranslationGranule, Address, Physical},
};
use core::intrinsics::unlikely;
use cortex_a::{barrier, regs::*};
//--------------------------------------------------------------------------------------------------
@ -37,15 +34,6 @@ struct MemoryManagementUnit;
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// The min supported address space size.
pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB
/// The max supported address space size.
pub const MAX_ADDR_SPACE_SIZE: usize = 8 * 1024 * 1024 * 1024; // 8 GiB
/// The supported address space size granule.
pub type AddrSpaceSizeGranule = Granule512MiB;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
@ -57,20 +45,24 @@ pub mod mair {
// Global instances
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// # Safety
///
/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0".
static KERNEL_TABLES: InitStateLock<KernelTranslationTable> =
InitStateLock::new(KernelTranslationTable::new());
static MMU: MemoryManagementUnit = MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
fn set_up_mair(&self) {
@ -87,19 +79,19 @@ impl MemoryManagementUnit {
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control(&self) {
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
let t0sz = (64 - bsp::memory::mmu::KernelVirtAddrSpaceSize::SHIFT) as u64;
let t0sz = (64 - bsp::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::EPD1::DisableTTBR1Walks
TCR_EL1::TBI0::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG0::KiB_64
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(t0sz),
+ TCR_EL1::A1::TTBR0
+ TCR_EL1::T0SZ.val(t0sz)
+ TCR_EL1::EPD1::DisableTTBR1Walks,
);
}
}
@ -108,11 +100,6 @@ impl MemoryManagementUnit {
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a guarded reference to the kernel's translation tables.
pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTable> {
&KERNEL_TABLES
}
/// Return a reference to the MMU instance.
pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
&MMU
@ -121,22 +108,29 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use memory::mmu::MMUEnableError;
impl memory::mmu::interface::MMU for MemoryManagementUnit {
unsafe fn enable(
unsafe fn enable_mmu_and_caching(
&self,
kernel_table_phys_base_addr: Address<Physical>,
) -> Result<(), &'static str> {
// Fail early if translation granule is not supported. Both RPis support it, though.
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) {
return Err("Translation granule not supported in HW");
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(kernel_table_phys_base_addr.into_usize() as u64);
TTBR0_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
self.configure_translation_control();
@ -153,23 +147,9 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit {
Ok(())
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check if KERNEL_TABLES is in .bss.
#[kernel_test]
fn kernel_tables_in_bss() {
let bss_range = bsp::memory::bss_range_inclusive();
let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64;
assert!(bss_range.contains(&kernel_tables_addr));
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
}

@ -20,7 +20,7 @@ use crate::{
arch_mmu::{Granule512MiB, Granule64KiB},
AccessPermissions, AttributeFields, MemAttributes, Page, PageSliceDescriptor,
},
Address, AddressType, Physical, Virtual,
Address, Physical, Virtual,
},
};
use core::convert;
@ -90,8 +90,8 @@ register_bitfields! {u64,
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
@ -119,19 +119,16 @@ struct PageDescriptor {
value: u64,
}
trait BaseAddr {
fn phys_base_addr(&self) -> Address<Physical>;
trait StartAddr {
fn phys_start_addr(&self) -> Address<Physical>;
}
const NUM_LVL2_TABLES: usize =
bsp::memory::mmu::KernelVirtAddrSpaceSize::SIZE >> Granule512MiB::SHIFT;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, hence the "reverse" order of appearance.
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
@ -148,16 +145,13 @@ pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
initialized: bool,
}
/// A translation table type for the kernel space.
pub type KernelTranslationTable = FixedSizeTranslationTable<NUM_LVL2_TABLES>;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl<T, const N: usize> BaseAddr for [T; N] {
fn phys_base_addr(&self) -> Address<Physical> {
// The binary is still identity mapped, so we don't need to convert here.
// The binary is still identity mapped, so we don't need to convert here.
impl<T, const N: usize> StartAddr for [T; N] {
fn phys_start_addr(&self) -> Address<Physical> {
Address::new(self as *const _ as usize)
}
}
@ -171,14 +165,14 @@ impl TableDescriptor {
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self {
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address<Physical>) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT;
let shifted = phys_next_lvl_table_addr.into_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::VALID::True
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64),
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
@ -232,18 +226,18 @@ impl PageDescriptor {
/// Create an instance.
pub fn from_output_addr(
output_addr: *const Page<Physical>,
phys_output_addr: *const Page<Physical>,
attribute_fields: &AttributeFields,
) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = output_addr as u64 >> Granule64KiB::SHIFT;
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::VALID::True
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ attribute_fields.clone().into()
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Table
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted),
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ attribute_fields.clone().into(),
);
Self { value: val.get() }
@ -260,6 +254,14 @@ impl PageDescriptor {
// Public Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AssociatedTranslationTable
for memory::mmu::AddressSpace<AS_SIZE>
where
[u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized,
{
type TableStartFromBottom = FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }>;
}
impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
// Reserve the last 256 MiB of the address space for MMIO mappings.
const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1;
@ -269,8 +271,9 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
#[allow(clippy::assertions_on_constants)]
pub const fn new() -> Self {
assert!(bsp::memory::mmu::KernelGranule::SIZE == Granule64KiB::SIZE);
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
assert!((bsp::memory::mmu::KernelVirtAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
@ -282,7 +285,7 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// The start address of the table's MMIO range.
#[inline(always)]
const fn mmio_start_addr(&self) -> Address<Virtual> {
fn mmio_start_addr(&self) -> Address<Virtual> {
Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT),
@ -291,7 +294,7 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// The inclusive end address of the table's MMIO range.
#[inline(always)]
const fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (8191 << Granule64KiB::SHIFT)
@ -301,12 +304,13 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
/// Helper to calculate the lvl2 and lvl3 indices from an address.
#[inline(always)]
fn lvl2_lvl3_index_from<ATYPE: AddressType>(
fn lvl2_lvl3_index_from(
&self,
addr: *const Page<ATYPE>,
addr: *const Page<Virtual>,
) -> Result<(usize, usize), &'static str> {
let lvl2_index = addr as usize >> Granule512MiB::SHIFT;
let lvl3_index = (addr as usize & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
let addr = addr as usize;
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
if lvl2_index > (NUM_TABLES - 1) {
return Err("Virtual page is out of bounds of translation table");
@ -334,16 +338,15 @@ impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::TranslationTable
for FixedSizeTranslationTable<NUM_TABLES>
{
unsafe fn init(&mut self) {
fn init(&mut self) {
if self.initialized {
return;
}
// Populate the l2 entries.
for (lvl2_nr, lvl2_entry) in self.lvl2.iter_mut().enumerate() {
let desc = TableDescriptor::from_next_lvl_table_addr(
self.lvl3[lvl2_nr].phys_base_addr().into_usize(),
);
let desc =
TableDescriptor::from_next_lvl_table_addr(self.lvl3[lvl2_nr].phys_start_addr());
*lvl2_entry = desc;
}
@ -352,7 +355,7 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
}
fn phys_base_address(&self) -> Address<Physical> {
self.lvl2.phys_base_addr()
self.lvl2.phys_start_addr()
}
unsafe fn map_pages_at(
@ -366,15 +369,15 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
let p = phys_pages.as_slice();
let v = virt_pages.as_slice();
if p.len() != v.len() {
return Err("Tried to map page slices with unequal sizes");
}
// No work to do for empty slices.
if p.is_empty() {
if v.is_empty() {
return Ok(());
}
if v.len() != p.len() {
return Err("Tried to map page slices with unequal sizes");
}
if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page() {
return Err("Tried to map outside of physical address space");
}
@ -406,14 +409,13 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
return Err("Not enough MMIO space left");
}
let addr = (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT);
let addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT),
);
self.cur_l3_mmio_index += num_pages;
Ok(PageSliceDescriptor::from_addr(
Address::new(addr),
num_pages,
))
Ok(PageSliceDescriptor::from_addr(addr, num_pages))
}
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool {
@ -435,7 +437,7 @@ impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::Transla
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
pub type MinSizeKernelTranslationTable = FixedSizeTranslationTable<1>;
pub type MinSizeTranslationTable = FixedSizeTranslationTable<1>;
#[cfg(test)]
mod tests {

@ -5,7 +5,7 @@
//! BSP console facilities.
use super::memory;
use crate::{bsp::device_driver, console, cpu};
use crate::{bsp::device_driver, console, cpu, driver};
use core::fmt;
//--------------------------------------------------------------------------------------------------
@ -23,7 +23,7 @@ use core::fmt;
///
/// - Use only for printing during a panic.
pub unsafe fn panic_console_out() -> impl fmt::Write {
use crate::driver::interface::DeviceDriver;
use driver::interface::DeviceDriver;
let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.into_usize());
let mut panic_uart =

@ -7,15 +7,23 @@
use crate::{
common,
memory::{
mmu as kernel_mmu,
mmu as generic_mmu,
mmu::{
AccessPermissions, AddressSpaceSize, AttributeFields, MemAttributes, Page,
PageSliceDescriptor, TranslationGranule,
AccessPermissions, AddressSpace, AssociatedTranslationTable, AttributeFields,
MemAttributes, Page, PageSliceDescriptor, TranslationGranule,
},
Physical, Virtual,
},
synchronization::InitStateLock,
};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
type KernelTranslationTable =
<KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromBottom;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
@ -24,8 +32,21 @@ use crate::{
/// derive respective data structures and their sizes. For example, the `crate::memory::mmu::Page`.
pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
/// The address space size chosen by this BSP.
pub type KernelVirtAddrSpaceSize = AddressSpaceSize<{ 8 * 1024 * 1024 * 1024 }>;
/// The kernel's virtual address space defined by this BSP.
pub type KernelVirtAddrSpace = AddressSpace<{ 8 * 1024 * 1024 * 1024 }>;
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// It is mandatory that InitStateLock is transparent.
///
/// That is, `size_of(InitStateLock<KernelTranslationTable>) == size_of(KernelTranslationTable)`.
/// There is a unit tests that checks this porperty.
static KERNEL_TABLES: InitStateLock<KernelTranslationTable> =
InitStateLock::new(KernelTranslationTable::new());
//--------------------------------------------------------------------------------------------------
// Private Code
@ -81,6 +102,11 @@ fn phys_data_page_desc() -> PageSliceDescriptor<Physical> {
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's translation tables.
pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTable> {
&KERNEL_TABLES
}
/// Pointer to the last page of the physical address space.
pub fn phys_addr_space_end_page() -> *const Page<Physical> {
common::align_down(
@ -95,7 +121,7 @@ pub fn phys_addr_space_end_page() -> *const Page<Physical> {
///
/// - Any miscalculation or attribute error will likely be fatal. Needs careful manual checking.
pub unsafe fn kernel_map_binary() -> Result<(), &'static str> {
kernel_mmu::kernel_map_pages_at(
generic_mmu::kernel_map_pages_at(
"Kernel boot-core stack",
&virt_stack_page_desc(),
&phys_stack_page_desc(),
@ -106,7 +132,7 @@ pub unsafe fn kernel_map_binary() -> Result<(), &'static str> {
},
)?;
kernel_mmu::kernel_map_pages_at(
generic_mmu::kernel_map_pages_at(
"Kernel code and RO data",
&virt_ro_page_desc(),
&phys_ro_page_desc(),
@ -117,7 +143,7 @@ pub unsafe fn kernel_map_binary() -> Result<(), &'static str> {
},
)?;
kernel_mmu::kernel_map_pages_at(
generic_mmu::kernel_map_pages_at(
"Kernel data and bss",
&virt_data_page_desc(),
&phys_data_page_desc(),
@ -157,18 +183,27 @@ mod tests {
#[kernel_test]
fn virt_mem_layout_has_no_overlaps() {
let layout = [
virt_stack_page_desc().into_usize_range_inclusive(),
virt_ro_page_desc().into_usize_range_inclusive(),
virt_data_page_desc().into_usize_range_inclusive(),
virt_stack_page_desc(),
virt_ro_page_desc(),
virt_data_page_desc(),
];
for (i, first_range) in layout.iter().enumerate() {
for second_range in layout.iter().skip(i + 1) {
assert!(!first_range.contains(second_range.start()));
assert!(!first_range.contains(second_range.end()));
assert!(!second_range.contains(first_range.start()));
assert!(!second_range.contains(first_range.end()));
assert!(!first_range.contains(second_range.start_addr()));
assert!(!first_range.contains(second_range.end_addr_inclusive()));
assert!(!second_range.contains(first_range.start_addr()));
assert!(!second_range.contains(first_range.end_addr_inclusive()));
}
}
}
/// Check if KERNEL_TABLES is in .bss.
#[kernel_test]
fn kernel_tables_in_bss() {
let bss_range = super::super::bss_range_inclusive();
let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64;
assert!(bss_range.contains(&kernel_tables_addr));
}
}

@ -112,6 +112,7 @@
#![allow(clippy::clippy::upper_case_acronyms)]
#![allow(incomplete_features)]
#![feature(asm)]
#![feature(const_evaluatable_checked)]
#![feature(const_fn)]
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_generics)]

@ -19,7 +19,7 @@ use libkernel::{bsp, cpu, driver, exception, info, memory, state, time, warn};
///
/// - Only a single core must be active and running this function.
/// - The init calls in this function must appear in the correct order:
/// - Virtual memory must be activated before the device drivers.
/// - Caching must be activated before the device drivers.
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
/// drivers (which currently employ IRQSafeNullLocks instead of spinlocks), will fail to
/// work on the RPi SoCs.
@ -29,10 +29,15 @@ unsafe fn kernel_init() -> ! {
exception::handling_init();
if let Err(string) = memory::mmu::kernel_map_binary_and_enable_mmu() {
panic!("Enabling MMU failed: {}", string);
let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() {
Err(string) => panic!("Error mapping kernel binary: {}", string),
Ok(addr) => addr,
};
if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) {
panic!("Enabling MMU failed: {}", e);
}
// Printing will silently fail fail from here on, because the driver's MMIO is not remapped yet.
// Printing will silently fail from here on, because the driver's MMIO is not remapped yet.
// Bring up the drivers needed for printing first.
for i in bsp::driver::driver_manager()

@ -7,7 +7,11 @@
pub mod mmu;
use crate::common;
use core::{marker::PhantomData, ops::RangeInclusive};
use core::{
fmt,
marker::PhantomData,
ops::{AddAssign, RangeInclusive, SubAssign},
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -74,6 +78,15 @@ impl<ATYPE: AddressType> core::ops::Add<usize> for Address<ATYPE> {
}
}
impl<ATYPE: AddressType> AddAssign for Address<ATYPE> {
fn add_assign(&mut self, other: Self) {
*self = Self {
value: self.value + other.into_usize(),
_address_type: PhantomData,
};
}
}
impl<ATYPE: AddressType> core::ops::Sub<usize> for Address<ATYPE> {
type Output = Self;
@ -85,6 +98,44 @@ impl<ATYPE: AddressType> core::ops::Sub<usize> for Address<ATYPE> {
}
}
impl<ATYPE: AddressType> SubAssign for Address<ATYPE> {
fn sub_assign(&mut self, other: Self) {
*self = Self {
value: self.value - other.into_usize(),
_address_type: PhantomData,
};
}
}
impl fmt::Display for Address<Physical> {
// Don't expect to see physical addresses greater than 40 bit.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q3: u8 = ((self.value >> 32) & 0xff) as u8;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:02x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
impl fmt::Display for Address<Virtual> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q4: u16 = ((self.value >> 48) & 0xffff) as u16;
let q3: u16 = ((self.value >> 32) & 0xffff) as u16;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:04x}_", q4)?;
write!(f, "{:04x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
/// Zero out an inclusive memory range.
///
/// # Safety

@ -17,6 +17,7 @@ use crate::{
memory::{Address, Physical, Virtual},
synchronization, warn,
};
use core::fmt;
pub use types::*;
@ -24,30 +25,48 @@ pub use types::*;
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
/// Turns on the MMU.
/// Turns on the MMU for the first time and enables data and instruction caching.
///
/// # Safety
///
/// - Must only be called after the kernel translation tables have been init()'ed.
/// - Changes the HW's global state.
unsafe fn enable(
unsafe fn enable_mmu_and_caching(
&self,
kernel_table_phys_base_addr: Address<Physical>,
) -> Result<(), &'static str>;
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes the size of an address space.
pub struct AddressSpaceSize<const AS_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Intended to be implemented for [`AddressSpace`].
pub trait AssociatedTranslationTable {
/// A translation table whose address range is:
///
/// [0, AS_SIZE - 1]
type TableStartFromBottom;
}
//--------------------------------------------------------------------------------------------------
// Private Code
@ -70,7 +89,7 @@ unsafe fn kernel_map_pages_at_unchecked(
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
arch_mmu::kernel_translation_tables()
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.map_pages_at(virt_pages, phys_pages, attr))?;
if let Err(x) = mapping_record::kernel_add(name, virt_pages, phys_pages, attr) {
@ -84,6 +103,15 @@ unsafe fn kernel_map_pages_at_unchecked(
// Public Code
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
@ -101,22 +129,18 @@ impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
}
}
impl<const AS_SIZE: usize> AddressSpaceSize<AS_SIZE> {
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two());
assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two());
// Must adhere to architectural restrictions.
assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE);
assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE);
assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0);
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}
@ -136,7 +160,7 @@ pub unsafe fn kernel_map_pages_at(
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let is_mmio = arch_mmu::kernel_translation_tables()
let is_mmio = bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.is_virt_page_slice_mmio(virt_pages));
if is_mmio {
return Err("Attempt to manually map into MMIO region");
@ -169,8 +193,9 @@ pub unsafe fn kernel_map_mmio(
addr
// Otherwise, allocate a new virtual page slice and map it.
} else {
let virt_pages: PageSliceDescriptor<Virtual> = arch_mmu::kernel_translation_tables()
.write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?;
let virt_pages: PageSliceDescriptor<Virtual> =
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?;
kernel_map_pages_at_unchecked(
name,
@ -189,19 +214,32 @@ pub unsafe fn kernel_map_mmio(
Ok(virt_addr + offset_into_start_page)
}
/// Map the kernel's binary and enable the MMU.
/// Map the kernel's binary. Returns the translation table's base address.
///
/// # Safety
///
/// - Crucial function during kernel init. Changes the the complete memory view of the processor.
pub unsafe fn kernel_map_binary_and_enable_mmu() -> Result<(), &'static str> {
let phys_base_addr = arch_mmu::kernel_translation_tables().write(|tables| {
tables.init();
tables.phys_base_address()
});
/// - See [`bsp::memory::mmu::kernel_map_binary()`].
pub unsafe fn kernel_map_binary() -> Result<Address<Physical>, &'static str> {
let phys_kernel_tables_base_addr =
bsp::memory::mmu::kernel_translation_tables().write(|tables| {
tables.init();
tables.phys_base_address()
});
bsp::memory::mmu::kernel_map_binary()?;
arch_mmu::mmu().enable(phys_base_addr)
Ok(phys_kernel_tables_base_addr)
}
/// Enable the MMU and data + instruction caching.
///
/// # Safety
///
/// - Crucial function during kernel init. Changes the the complete memory view of the processor.
pub unsafe fn enable_mmu_and_caching(
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr)
}
/// Human-readable print of all recorded kernel mappings.

@ -111,12 +111,12 @@ impl MappingRecord {
const KIB_RSHIFT: u32 = 10; // log2(1024).
const MIB_RSHIFT: u32 = 20; // log2(1024 * 1024).
info!(" -----------------------------------------------------------------------------------------------------------------");
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
info!(
" {:^24} {:^24} {:^7} {:^9} {:^35}",
" {:^44} {:^30} {:^7} {:^9} {:^35}",
"Virtual", "Physical", "Size", "Attr", "Entity"
);
info!(" -----------------------------------------------------------------------------------------------------------------");
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
for i in self
.inner
@ -124,10 +124,10 @@ impl MappingRecord {
.filter(|x| x.is_some())
.map(|x| x.unwrap())
{
let virt_start = i.virt_start_addr.into_usize();
let virt_start = i.virt_start_addr;
let virt_end_inclusive = virt_start + i.phys_pages.size() - 1;
let phys_start = i.phys_pages.start_addr().into_usize();
let phys_end_inclusive = i.phys_pages.end_addr_inclusive().into_usize();
let phys_start = i.phys_pages.start_addr();
let phys_end_inclusive = i.phys_pages.end_addr_inclusive();
let size = i.phys_pages.size();
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
@ -155,7 +155,7 @@ impl MappingRecord {
};
info!(
" {:#011X}..{:#011X} --> {:#011X}..{:#011X} | \
" {}..{} --> {}..{} | \
{: >3} {} | {: <3} {} {: <2} | {}",
virt_start,
virt_end_inclusive,
@ -172,14 +172,14 @@ impl MappingRecord {
for k in i.users[1..].iter() {
if let Some(additional_user) = *k {
info!(
" | {}",
" | {}",
additional_user
);
}
}
}
info!(" -----------------------------------------------------------------------------------------------------------------");
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
}
}

@ -16,7 +16,8 @@ use crate::memory::{
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_translation_table::KernelTranslationTable;
#[cfg(target_arch = "aarch64")]
pub use arch_translation_table::FixedSizeTranslationTable;
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -34,7 +35,7 @@ pub mod interface {
///
/// - Implementor must ensure that this function can run only once or is harmless if invoked
/// multiple times.
unsafe fn init(&mut self);
fn init(&mut self);
/// The translation table's base address to be used for programming the MMU.
fn phys_base_address(&self) -> Address<Physical>;
@ -80,17 +81,17 @@ pub mod interface {
mod tests {
use super::*;
use crate::bsp;
use arch_translation_table::MinSizeKernelTranslationTable;
use arch_translation_table::MinSizeTranslationTable;
use interface::TranslationTable;
use test_macros::kernel_test;
/// Sanity checks for the kernel TranslationTable implementation.
/// Sanity checks for the TranslationTable implementation.
#[kernel_test]
fn translationtable_implementation_sanity() {
// Need to take care that `tables` fits into the stack.
let mut tables = MinSizeKernelTranslationTable::new();
// This will occupy a lot of space on the stack.
let mut tables = MinSizeTranslationTable::new();
unsafe { tables.init() };
tables.init();
let x = tables.next_mmio_virt_page_slice(0);
assert!(x.is_err());

@ -8,7 +8,7 @@ use crate::{
bsp, common,
memory::{Address, AddressType, Physical, Virtual},
};
use core::{convert::From, marker::PhantomData, ops::RangeInclusive};
use core::{convert::From, marker::PhantomData};
//--------------------------------------------------------------------------------------------------
// Public Definitions
@ -121,6 +121,11 @@ impl<ATYPE: AddressType> PageSliceDescriptor<ATYPE> {
self.start + (self.size() - 1)
}
/// Check if an address is contained within this descriptor.
pub fn contains(&self, addr: Address<ATYPE>) -> bool {
(addr >= self.start_addr()) && (addr <= self.end_addr_inclusive())
}
/// Return a non-mutable slice of Pages.
///
/// # Safety
@ -129,14 +134,6 @@ impl<ATYPE: AddressType> PageSliceDescriptor<ATYPE> {
pub unsafe fn as_slice(&self) -> &[Page<ATYPE>] {
core::slice::from_raw_parts(self.first_page_ptr(), self.num_pages)
}
/// Return the inclusive address range of the slice.
pub fn into_usize_range_inclusive(self) -> RangeInclusive<usize> {
RangeInclusive::new(
self.start_addr().into_usize(),
self.end_addr_inclusive().into_usize(),
)
}
}
impl From<PageSliceDescriptor<Virtual>> for PageSliceDescriptor<Physical> {

@ -139,3 +139,21 @@ impl<T> interface::ReadWriteEx for InitStateLock<T> {
f(data)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// InitStateLock must be transparent.
#[kernel_test]
fn init_state_lock_is_transparent() {
use core::mem::size_of;
assert_eq!(size_of::<InitStateLock<u64>>(), size_of::<u64>());
}
}

@ -29,11 +29,19 @@ unsafe fn kernel_init() -> ! {
println!("Testing synchronous exception handling by causing a page fault");
println!("-------------------------------------------------------------------\n");
if let Err(string) = memory::mmu::kernel_map_binary_and_enable_mmu() {
println!("Enabling MMU failed: {}", string);
let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() {
Err(string) => {
println!("Error mapping kernel binary: {}", string);
cpu::qemu_exit_failure()
}
Ok(addr) => addr,
};
if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) {
println!("Enabling MMU failed: {}", e);
cpu::qemu_exit_failure()
}
// Printing will silently fail fail from here on, because the driver's MMIO is not remapped yet.
// Printing will silently fail from here on, because the driver's MMIO is not remapped yet.
// Bring up the drivers needed for printing first.
for i in bsp::driver::driver_manager()

Loading…
Cancel
Save