From d09374710dda2221320ca5fc599db1cc412720f1 Mon Sep 17 00:00:00 2001 From: Andre Richter Date: Mon, 15 Mar 2021 22:07:01 +0100 Subject: [PATCH] Some rework on virtual memory code - Mostly more spearation of concerns in 15. - Cleanups in other parts. --- .../README.md | 194 +++-- .../src/_arch/aarch64/memory/mmu.rs | 60 +- .../aarch64/memory/mmu/translation_table.rs | 54 +- .../src/bsp/raspberrypi/memory/mmu.rs | 4 +- .../src/main.rs | 5 +- .../src/memory/mmu.rs | 39 +- 12_exceptions_part1_groundwork/README.md | 14 +- .../src/_arch/aarch64/memory/mmu.rs | 60 +- .../aarch64/memory/mmu/translation_table.rs | 54 +- .../src/bsp/raspberrypi/memory/mmu.rs | 4 +- 12_exceptions_part1_groundwork/src/main.rs | 5 +- .../src/memory/mmu.rs | 39 +- 13_integrated_testing/README.md | 24 +- .../src/_arch/aarch64/memory/mmu.rs | 60 +- .../aarch64/memory/mmu/translation_table.rs | 54 +- .../src/bsp/raspberrypi/memory/mmu.rs | 4 +- 13_integrated_testing/src/lib.rs | 1 + 13_integrated_testing/src/main.rs | 4 +- 13_integrated_testing/src/memory/mmu.rs | 39 +- .../tests/02_exception_sync_page_fault.rs | 2 +- 14_exceptions_part2_peripheral_IRQs/README.md | 38 +- .../src/_arch/aarch64/memory/mmu.rs | 60 +- .../aarch64/memory/mmu/translation_table.rs | 54 +- .../src/bsp/raspberrypi/memory/mmu.rs | 4 +- .../src/main.rs | 4 +- .../src/memory/mmu.rs | 39 +- .../src/synchronization.rs | 18 + .../tests/02_exception_sync_page_fault.rs | 2 +- 15_virtual_mem_part2_mmio_remap/README.md | 805 +++++++++++------- .../src/_arch/aarch64/memory/mmu.rs | 94 +- .../aarch64/memory/mmu/translation_table.rs | 100 +-- .../src/bsp/raspberrypi/console.rs | 4 +- .../src/bsp/raspberrypi/memory/mmu.rs | 65 +- 15_virtual_mem_part2_mmio_remap/src/lib.rs | 1 + 15_virtual_mem_part2_mmio_remap/src/main.rs | 13 +- 15_virtual_mem_part2_mmio_remap/src/memory.rs | 53 +- .../src/memory/mmu.rs | 92 +- .../src/memory/mmu/mapping_record.rs | 18 +- .../src/memory/mmu/translation_table.rs | 15 +- .../src/memory/mmu/types.rs | 15 +- .../src/synchronization.rs | 18 + .../tests/02_exception_sync_page_fault.rs | 14 +- 42 files changed, 1383 insertions(+), 863 deletions(-) diff --git a/11_virtual_mem_part1_identity_mapping/README.md b/11_virtual_mem_part1_identity_mapping/README.md index 7b822f68..270c8422 100644 --- a/11_virtual_mem_part1_identity_mapping/README.md +++ b/11_virtual_mem_part1_identity_mapping/README.md @@ -115,12 +115,12 @@ descriptors). In `translation_table.rs`, there is a definition of the actual translation table struct which is generic over the number of `LVL2` tables. The latter depends on the size of the target board's memory. Naturally, the `BSP` knows these details about the target board, and provides the size -through the constant `bsp::memory::mmu::KernelAddrSpaceSize::SIZE`. +through the constant `bsp::memory::mmu::KernelAddrSpace::SIZE`. This information is used by `translation_table.rs` to calculate the number of needed `LVL2` tables. Since one `LVL2` table in a `64 KiB` configuration covers `512 MiB`, all that needs to be done is to -divide `KernelAddrSpaceSize::SIZE` by `512 MiB` (there are several compile-time checks in place that -ensure that `KernelAddrSpaceSize` is a multiple of `512 MiB`). +divide `KernelAddrSpace::SIZE` by `512 MiB` (there are several compile-time checks in place that +ensure that `KernelAddrSpace::SIZE` is a multiple of `512 MiB`). The final table type is exported as `KernelTranslationTable`. Below is the respective excerpt from `translation_table.rs`: @@ -144,7 +144,7 @@ struct PageDescriptor { value: u64, } -const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; +const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; //-------------------------------------------------------------------------------------------------- // Public Definitions @@ -175,10 +175,6 @@ tables: //-------------------------------------------------------------------------------------------------- /// The kernel translation tables. -/// -/// # Safety -/// -/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0". static mut KERNEL_TABLES: KernelTranslationTable = KernelTranslationTable::new(); ``` @@ -213,10 +209,10 @@ Afterwards, the [Translation Table Base Register 0 - EL1] is set up with the bas `lvl2` tables and the [Translation Control Register - EL1] is configured: ```rust - // Set the "Translation Table Base Register". - TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address()); +// Set the "Translation Table Base Register". +TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address()); - self.configure_translation_control(); +self.configure_translation_control(); ``` Finally, the `MMU` is turned on through the [System Control Register - EL1]. The last step also @@ -297,7 +293,7 @@ unsafe fn kernel_init() -> ! { use driver::interface::DriverManager; use memory::mmu::interface::MMU; - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { panic!("MMU: {}", string); } ``` @@ -439,8 +435,8 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 + AttrIndx OFFSET(2) NUMBITS(3) [], + + TYPE OFFSET(1) NUMBITS(1) [ -+ Block = 0, -+ Table = 1 ++ Reserved_Invalid = 0, ++ Page = 1 + ], + + VALID OFFSET(0) NUMBITS(1) [ @@ -468,19 +464,19 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 + value: u64, +} + -+trait BaseAddr { -+ fn base_addr_u64(&self) -> u64; -+ fn base_addr_usize(&self) -> usize; ++trait StartAddr { ++ fn phys_start_addr_u64(&self) -> u64; ++ fn phys_start_addr_usize(&self) -> usize; +} + -+const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; ++const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; + +//-------------------------------------------------------------------------------------------------- +// Public Definitions +//-------------------------------------------------------------------------------------------------- + +/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB -+/// aligned, hence the "reverse" order of appearance. ++/// aligned, so the lvl3 is put first. +#[repr(C)] +#[repr(align(65536))] +pub struct FixedSizeTranslationTable { @@ -498,12 +494,13 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 +// Private Code +//-------------------------------------------------------------------------------------------------- + -+impl BaseAddr for [T; N] { -+ fn base_addr_u64(&self) -> u64 { ++// The binary is still identity mapped, so we don't need to convert here. ++impl StartAddr for [T; N] { ++ fn phys_start_addr_u64(&self) -> u64 { + self as *const T as u64 + } + -+ fn base_addr_usize(&self) -> usize { ++ fn phys_start_addr_usize(&self) -> usize { + self as *const _ as usize + } +} @@ -517,14 +514,14 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 + } + + /// Create an instance pointing to the supplied address. -+ pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self { ++ pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self { + let val = InMemoryRegister::::new(0); + -+ let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT; ++ let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT; + val.write( -+ STAGE1_TABLE_DESCRIPTOR::VALID::True ++ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + + STAGE1_TABLE_DESCRIPTOR::TYPE::Table -+ + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64), ++ + STAGE1_TABLE_DESCRIPTOR::VALID::True, + ); + + TableDescriptor { value: val.get() } @@ -577,16 +574,16 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 + } + + /// Create an instance. -+ pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self { ++ pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self { + let val = InMemoryRegister::::new(0); + -+ let shifted = output_addr as u64 >> Granule64KiB::SHIFT; ++ let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; + val.write( -+ STAGE1_PAGE_DESCRIPTOR::VALID::True ++ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted) + + STAGE1_PAGE_DESCRIPTOR::AF::True -+ + attribute_fields.into() -+ + STAGE1_PAGE_DESCRIPTOR::TYPE::Table -+ + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted), ++ + STAGE1_PAGE_DESCRIPTOR::TYPE::Page ++ + STAGE1_PAGE_DESCRIPTOR::VALID::True ++ + attribute_fields.clone().into(), + ); + + Self { value: val.get() } @@ -599,10 +596,9 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 + +impl FixedSizeTranslationTable { + /// Create an instance. -+ #[allow(clippy::assertions_on_constants)] + pub const fn new() -> Self { ++ // Can't have a zero-sized address space. + assert!(NUM_TABLES > 0); -+ assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE modulo Granule512MiB::SIZE) == 0); + + Self { + lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], @@ -618,15 +614,15 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 + pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> { + for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { + *l2_entry = -+ TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize()); ++ TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize()); + + for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { + let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT); + -+ let (output_addr, attribute_fields) = ++ let (phys_output_addr, attribute_fields) = + bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?; + -+ *l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields); ++ *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields); + } + } + @@ -634,15 +630,15 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu/translation_table.rs 1 + } + + /// The translation table's base address to be used for programming the MMU. -+ pub fn base_address(&self) -> u64 { -+ self.lvl2.base_addr_u64() ++ pub fn phys_base_address(&self) -> u64 { ++ self.lvl2.phys_start_addr_u64() + } +} diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs --- 10_privilege_level/src/_arch/aarch64/memory/mmu.rs +++ 11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs -@@ -0,0 +1,146 @@ +@@ -0,0 +1,164 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2018-2021 Andre Richter @@ -662,6 +658,7 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part + bsp, memory, + memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule}, +}; ++use core::intrinsics::unlikely; +use cortex_a::{barrier, regs::*}; + +//-------------------------------------------------------------------------------------------------- @@ -678,15 +675,6 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part +pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>; +pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>; + -+/// The min supported address space size. -+pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB -+ -+/// The max supported address space size. -+pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB -+ -+/// The supported address space size granule. -+pub type AddrSpaceSizeGranule = Granule512MiB; -+ +/// Constants for indexing the MAIR_EL1. +#[allow(dead_code)] +pub mod mair { @@ -711,6 +699,18 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part +// Private Code +//-------------------------------------------------------------------------------------------------- + ++impl memory::mmu::AddressSpace { ++ /// Checks for architectural restrictions. ++ pub const fn arch_address_space_size_sanity_checks() { ++ // Size must be at least one full 512 MiB table. ++ assert!((AS_SIZE modulo Granule512MiB::SIZE) == 0); ++ ++ // Check for 48 bit virtual address size as maximum, which is supported by any ARMv8 ++ // version. ++ assert!(AS_SIZE <= (1 << 48)); ++ } ++} ++ +impl MemoryManagementUnit { + /// Setup function for the MAIR_EL1 register. + fn set_up_mair(&self) { @@ -727,19 +727,19 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part + + /// Configure various settings of stage 1 of the EL1 translation regime. + fn configure_translation_control(&self) { -+ let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); -+ let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64; ++ let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64; + + TCR_EL1.write( -+ TCR_EL1::TBI0::Ignored -+ + TCR_EL1::IPS.val(ips) -+ + TCR_EL1::EPD1::DisableTTBR1Walks ++ TCR_EL1::TBI0::Used ++ + TCR_EL1::IPS::Bits_40 + + TCR_EL1::TG0::KiB_64 + + TCR_EL1::SH0::Inner + + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::EPD0::EnableTTBR0Walks -+ + TCR_EL1::T0SZ.val(t0sz), ++ + TCR_EL1::A1::TTBR0 ++ + TCR_EL1::T0SZ.val(t0sz) ++ + TCR_EL1::EPD1::DisableTTBR1Walks, + ); + } +} @@ -756,22 +756,31 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part +//------------------------------------------------------------------------------ +// OS Interface Code +//------------------------------------------------------------------------------ ++use memory::mmu::MMUEnableError; + +impl memory::mmu::interface::MMU for MemoryManagementUnit { -+ unsafe fn init(&self) -> Result<(), &'static str> { -+ // Fail early if translation granule is not supported. Both RPis support it, though. -+ if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { -+ return Err("Translation granule not supported in HW"); ++ unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> { ++ if unlikely(self.is_enabled()) { ++ return Err(MMUEnableError::AlreadyEnabled); ++ } ++ ++ // Fail early if translation granule is not supported. ++ if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) { ++ return Err(MMUEnableError::Other( ++ "Translation granule not supported in HW", ++ )); + } + + // Prepare the memory attribute indirection register. + self.set_up_mair(); + + // Populate translation tables. -+ KERNEL_TABLES.populate_tt_entries()?; ++ KERNEL_TABLES ++ .populate_tt_entries() ++ .map_err(|e| MMUEnableError::Other(e))?; + + // Set the "Translation Table Base Register". -+ TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address()); ++ TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address()); + + self.configure_translation_control(); + @@ -788,6 +797,11 @@ diff -uNr 10_privilege_level/src/_arch/aarch64/memory/mmu.rs 11_virtual_mem_part + + Ok(()) + } ++ ++ #[inline(always)] ++ fn is_enabled(&self) -> bool { ++ SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) ++ } +} diff -uNr 10_privilege_level/src/bsp/raspberrypi/link.ld 11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/link.ld @@ -829,8 +843,8 @@ diff -uNr 10_privilege_level/src/bsp/raspberrypi/memory/mmu.rs 11_virtual_mem_pa +// Public Definitions +//-------------------------------------------------------------------------------------------------- + -+/// The address space size chosen by this BSP. -+pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>; ++/// The kernel's address space defined by this BSP. ++pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>; + +const NUM_MEM_RANGES: usize = 3; + @@ -1006,7 +1020,7 @@ diff -uNr 10_privilege_level/src/bsp.rs 11_virtual_mem_part1_identity_mapping/sr diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/src/main.rs --- 10_privilege_level/src/main.rs +++ 11_virtual_mem_part1_identity_mapping/src/main.rs -@@ -108,7 +108,10 @@ +@@ -108,7 +108,11 @@ //! [`runtime_init::runtime_init()`]: runtime_init/fn.runtime_init.html #![allow(clippy::clippy::upper_case_acronyms)] @@ -1014,16 +1028,17 @@ diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/s #![feature(const_fn_fn_ptr_basics)] +#![feature(const_generics)] +#![feature(const_panic)] ++#![feature(core_intrinsics)] #![feature(format_args_nl)] #![feature(panic_info_message)] #![feature(trait_alias)] -@@ -132,9 +135,18 @@ +@@ -132,9 +136,18 @@ /// # Safety /// /// - Only a single core must be active and running this function. -/// - The init calls in this function must appear in the correct order. +/// - The init calls in this function must appear in the correct order: -+/// - Virtual memory must be activated before the device drivers. ++/// - Caching must be activated before the device drivers. +/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device +/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on +/// the RPi SoCs. @@ -1031,13 +1046,13 @@ diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/s use driver::interface::DriverManager; + use memory::mmu::interface::MMU; + -+ if let Err(string) = memory::mmu::mmu().init() { ++ if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { + panic!("MMU: {}", string); + } for i in bsp::driver::driver_manager().all_device_drivers().iter() { if let Err(x) = i.init() { -@@ -158,6 +170,9 @@ +@@ -158,6 +171,9 @@ info!("Booting on: {}", bsp::board_name()); @@ -1047,7 +1062,7 @@ diff -uNr 10_privilege_level/src/main.rs 11_virtual_mem_part1_identity_mapping/s let (_, privilege_level) = exception::current_privilege_level(); info!("Current privilege level: {}", privilege_level); -@@ -181,6 +196,13 @@ +@@ -181,6 +197,13 @@ info!("Timer test, spinning for 1 second"); time::time_manager().spin_for(Duration::from_secs(1)); @@ -1084,7 +1099,7 @@ diff -uNr 10_privilege_level/src/memory/mmu/translation_table.rs 11_virtual_mem_ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs --- 10_privilege_level/src/memory/mmu.rs +++ 11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs -@@ -0,0 +1,247 @@ +@@ -0,0 +1,264 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2020-2021 Andre Richter @@ -1118,8 +1133,17 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map +// Public Definitions +//-------------------------------------------------------------------------------------------------- + ++/// MMU enable errors variants. ++#[allow(missing_docs)] ++#[derive(Debug)] ++pub enum MMUEnableError { ++ AlreadyEnabled, ++ Other(&'static str), ++} ++ +/// Memory Management interfaces. +pub mod interface { ++ use super::*; + + /// MMU functions. + pub trait MMU { @@ -1129,15 +1153,18 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map + /// # Safety + /// + /// - Changes the HW's global state. -+ unsafe fn init(&self) -> Result<(), &'static str>; ++ unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>; ++ ++ /// Returns true if the MMU is enabled, false otherwise. ++ fn is_enabled(&self) -> bool; + } +} + +/// Describes the characteristics of a translation granule. +pub struct TranslationGranule; + -+/// Describes the size of an address space. -+pub struct AddressSpaceSize; ++/// Describes properties of an address space. ++pub struct AddressSpace; + +/// Architecture agnostic translation types. +#[allow(missing_docs)] @@ -1195,6 +1222,15 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map +// Public Code +//-------------------------------------------------------------------------------------------------- + ++impl fmt::Display for MMUEnableError { ++ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ++ match self { ++ MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"), ++ MMUEnableError::Other(x) => write!(f, "{}", x), ++ } ++ } ++} ++ +impl TranslationGranule { + /// The granule's size. + pub const SIZE: usize = Self::size_checked(); @@ -1209,22 +1245,18 @@ diff -uNr 10_privilege_level/src/memory/mmu.rs 11_virtual_mem_part1_identity_map + } +} + -+impl AddressSpaceSize { ++impl AddressSpace { + /// The address space size. + pub const SIZE: usize = Self::size_checked(); + + /// The address space shift, aka log2(size). -+ pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; ++ pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + + const fn size_checked() -> usize { + assert!(AS_SIZE.is_power_of_two()); -+ assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two()); -+ assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two()); + -+ // Must adhere to architectural restrictions. -+ assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE); -+ assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE); -+ assert!((AS_SIZE modulo arch_mmu::AddrSpaceSizeGranule::SIZE) == 0); ++ // Check for architectural restrictions as well. ++ Self::arch_address_space_size_sanity_checks(); + + AS_SIZE + } diff --git a/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs b/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs index 3504d257..2f7bf615 100644 --- a/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs +++ b/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu.rs @@ -17,6 +17,7 @@ use crate::{ bsp, memory, memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule}, }; +use core::intrinsics::unlikely; use cortex_a::{barrier, regs::*}; //-------------------------------------------------------------------------------------------------- @@ -33,15 +34,6 @@ struct MemoryManagementUnit; pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>; pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>; -/// The min supported address space size. -pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB - -/// The max supported address space size. -pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB - -/// The supported address space size granule. -pub type AddrSpaceSizeGranule = Granule512MiB; - /// Constants for indexing the MAIR_EL1. #[allow(dead_code)] pub mod mair { @@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit; // Private Code //-------------------------------------------------------------------------------------------------- +impl memory::mmu::AddressSpace { + /// Checks for architectural restrictions. + pub const fn arch_address_space_size_sanity_checks() { + // Size must be at least one full 512 MiB table. + assert!((AS_SIZE % Granule512MiB::SIZE) == 0); + + // Check for 48 bit virtual address size as maximum, which is supported by any ARMv8 + // version. + assert!(AS_SIZE <= (1 << 48)); + } +} + impl MemoryManagementUnit { /// Setup function for the MAIR_EL1 register. fn set_up_mair(&self) { @@ -82,19 +86,19 @@ impl MemoryManagementUnit { /// Configure various settings of stage 1 of the EL1 translation regime. fn configure_translation_control(&self) { - let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); - let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64; + let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64; TCR_EL1.write( - TCR_EL1::TBI0::Ignored - + TCR_EL1::IPS.val(ips) - + TCR_EL1::EPD1::DisableTTBR1Walks + TCR_EL1::TBI0::Used + + TCR_EL1::IPS::Bits_40 + TCR_EL1::TG0::KiB_64 + TCR_EL1::SH0::Inner + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::T0SZ.val(t0sz), + + TCR_EL1::A1::TTBR0 + + TCR_EL1::T0SZ.val(t0sz) + + TCR_EL1::EPD1::DisableTTBR1Walks, ); } } @@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU { //------------------------------------------------------------------------------ // OS Interface Code //------------------------------------------------------------------------------ +use memory::mmu::MMUEnableError; impl memory::mmu::interface::MMU for MemoryManagementUnit { - unsafe fn init(&self) -> Result<(), &'static str> { - // Fail early if translation granule is not supported. Both RPis support it, though. - if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { - return Err("Translation granule not supported in HW"); + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> { + if unlikely(self.is_enabled()) { + return Err(MMUEnableError::AlreadyEnabled); + } + + // Fail early if translation granule is not supported. + if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) { + return Err(MMUEnableError::Other( + "Translation granule not supported in HW", + )); } // Prepare the memory attribute indirection register. self.set_up_mair(); // Populate translation tables. - KERNEL_TABLES.populate_tt_entries()?; + KERNEL_TABLES + .populate_tt_entries() + .map_err(|e| MMUEnableError::Other(e))?; // Set the "Translation Table Base Register". - TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address()); + TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address()); self.configure_translation_control(); @@ -143,4 +156,9 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit { Ok(()) } + + #[inline(always)] + fn is_enabled(&self) -> bool { + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) + } } diff --git a/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu/translation_table.rs b/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu/translation_table.rs index cbe1d783..f38d0895 100644 --- a/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu/translation_table.rs +++ b/11_virtual_mem_part1_identity_mapping/src/_arch/aarch64/memory/mmu/translation_table.rs @@ -87,8 +87,8 @@ register_bitfields! {u64, AttrIndx OFFSET(2) NUMBITS(3) [], TYPE OFFSET(1) NUMBITS(1) [ - Block = 0, - Table = 1 + Reserved_Invalid = 0, + Page = 1 ], VALID OFFSET(0) NUMBITS(1) [ @@ -116,19 +116,19 @@ struct PageDescriptor { value: u64, } -trait BaseAddr { - fn base_addr_u64(&self) -> u64; - fn base_addr_usize(&self) -> usize; +trait StartAddr { + fn phys_start_addr_u64(&self) -> u64; + fn phys_start_addr_usize(&self) -> usize; } -const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; +const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; //-------------------------------------------------------------------------------------------------- // Public Definitions //-------------------------------------------------------------------------------------------------- /// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB -/// aligned, hence the "reverse" order of appearance. +/// aligned, so the lvl3 is put first. #[repr(C)] #[repr(align(65536))] pub struct FixedSizeTranslationTable { @@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable; // Private Code //-------------------------------------------------------------------------------------------------- -impl BaseAddr for [T; N] { - fn base_addr_u64(&self) -> u64 { +// The binary is still identity mapped, so we don't need to convert here. +impl StartAddr for [T; N] { + fn phys_start_addr_u64(&self) -> u64 { self as *const T as u64 } - fn base_addr_usize(&self) -> usize { + fn phys_start_addr_usize(&self) -> usize { self as *const _ as usize } } @@ -165,14 +166,14 @@ impl TableDescriptor { } /// Create an instance pointing to the supplied address. - pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self { + pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self { let val = InMemoryRegister::::new(0); - let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT; + let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT; val.write( - STAGE1_TABLE_DESCRIPTOR::VALID::True + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + STAGE1_TABLE_DESCRIPTOR::TYPE::Table - + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64), + + STAGE1_TABLE_DESCRIPTOR::VALID::True, ); TableDescriptor { value: val.get() } @@ -225,16 +226,16 @@ impl PageDescriptor { } /// Create an instance. - pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self { + pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self { let val = InMemoryRegister::::new(0); - let shifted = output_addr as u64 >> Granule64KiB::SHIFT; + let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; val.write( - STAGE1_PAGE_DESCRIPTOR::VALID::True + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted) + STAGE1_PAGE_DESCRIPTOR::AF::True - + attribute_fields.into() - + STAGE1_PAGE_DESCRIPTOR::TYPE::Table - + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted), + + STAGE1_PAGE_DESCRIPTOR::TYPE::Page + + STAGE1_PAGE_DESCRIPTOR::VALID::True + + attribute_fields.clone().into(), ); Self { value: val.get() } @@ -247,10 +248,9 @@ impl PageDescriptor { impl FixedSizeTranslationTable { /// Create an instance. - #[allow(clippy::assertions_on_constants)] pub const fn new() -> Self { + // Can't have a zero-sized address space. assert!(NUM_TABLES > 0); - assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0); Self { lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], @@ -266,15 +266,15 @@ impl FixedSizeTranslationTable { pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> { for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { *l2_entry = - TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize()); + TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize()); for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT); - let (output_addr, attribute_fields) = + let (phys_output_addr, attribute_fields) = bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?; - *l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields); + *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields); } } @@ -282,7 +282,7 @@ impl FixedSizeTranslationTable { } /// The translation table's base address to be used for programming the MMU. - pub fn base_address(&self) -> u64 { - self.lvl2.base_addr_u64() + pub fn phys_base_address(&self) -> u64 { + self.lvl2.phys_start_addr_u64() } } diff --git a/11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.rs b/11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.rs index 911d1054..703bb3ba 100644 --- a/11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.rs +++ b/11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.rs @@ -12,8 +12,8 @@ use core::ops::RangeInclusive; // Public Definitions //-------------------------------------------------------------------------------------------------- -/// The address space size chosen by this BSP. -pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>; +/// The kernel's address space defined by this BSP. +pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>; const NUM_MEM_RANGES: usize = 3; diff --git a/11_virtual_mem_part1_identity_mapping/src/main.rs b/11_virtual_mem_part1_identity_mapping/src/main.rs index 9f755c9b..ccc842de 100644 --- a/11_virtual_mem_part1_identity_mapping/src/main.rs +++ b/11_virtual_mem_part1_identity_mapping/src/main.rs @@ -112,6 +112,7 @@ #![feature(const_fn_fn_ptr_basics)] #![feature(const_generics)] #![feature(const_panic)] +#![feature(core_intrinsics)] #![feature(format_args_nl)] #![feature(panic_info_message)] #![feature(trait_alias)] @@ -136,7 +137,7 @@ mod time; /// /// - Only a single core must be active and running this function. /// - The init calls in this function must appear in the correct order: -/// - Virtual memory must be activated before the device drivers. +/// - Caching must be activated before the device drivers. /// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device /// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on /// the RPi SoCs. @@ -144,7 +145,7 @@ unsafe fn kernel_init() -> ! { use driver::interface::DriverManager; use memory::mmu::interface::MMU; - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { panic!("MMU: {}", string); } diff --git a/11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs b/11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs index efc9c447..77cdcc06 100644 --- a/11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs +++ b/11_virtual_mem_part1_identity_mapping/src/memory/mmu.rs @@ -31,8 +31,17 @@ pub use arch_mmu::mmu; // Public Definitions //-------------------------------------------------------------------------------------------------- +/// MMU enable errors variants. +#[allow(missing_docs)] +#[derive(Debug)] +pub enum MMUEnableError { + AlreadyEnabled, + Other(&'static str), +} + /// Memory Management interfaces. pub mod interface { + use super::*; /// MMU functions. pub trait MMU { @@ -42,15 +51,18 @@ pub mod interface { /// # Safety /// /// - Changes the HW's global state. - unsafe fn init(&self) -> Result<(), &'static str>; + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>; + + /// Returns true if the MMU is enabled, false otherwise. + fn is_enabled(&self) -> bool; } } /// Describes the characteristics of a translation granule. pub struct TranslationGranule; -/// Describes the size of an address space. -pub struct AddressSpaceSize; +/// Describes properties of an address space. +pub struct AddressSpace; /// Architecture agnostic translation types. #[allow(missing_docs)] @@ -108,6 +120,15 @@ pub struct KernelVirtualLayout { // Public Code //-------------------------------------------------------------------------------------------------- +impl fmt::Display for MMUEnableError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"), + MMUEnableError::Other(x) => write!(f, "{}", x), + } + } +} + impl TranslationGranule { /// The granule's size. pub const SIZE: usize = Self::size_checked(); @@ -122,22 +143,18 @@ impl TranslationGranule { } } -impl AddressSpaceSize { +impl AddressSpace { /// The address space size. pub const SIZE: usize = Self::size_checked(); /// The address space shift, aka log2(size). - pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize; const fn size_checked() -> usize { assert!(AS_SIZE.is_power_of_two()); - assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two()); - assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two()); - // Must adhere to architectural restrictions. - assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE); - assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE); - assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0); + // Check for architectural restrictions as well. + Self::arch_address_space_size_sanity_checks(); AS_SIZE } diff --git a/12_exceptions_part1_groundwork/README.md b/12_exceptions_part1_groundwork/README.md index 53c953df..0ce07116 100644 --- a/12_exceptions_part1_groundwork/README.md +++ b/12_exceptions_part1_groundwork/README.md @@ -900,8 +900,8 @@ diff -uNr 11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.r --- 11_virtual_mem_part1_identity_mapping/src/bsp/raspberrypi/memory/mmu.rs +++ 12_exceptions_part1_groundwork/src/bsp/raspberrypi/memory/mmu.rs @@ -15,7 +15,7 @@ - /// The address space size chosen by this BSP. - pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>; + /// The kernel's address space defined by this BSP. + pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>; -const NUM_MEM_RANGES: usize = 3; +const NUM_MEM_RANGES: usize = 2; @@ -967,24 +967,24 @@ diff -uNr 11_virtual_mem_part1_identity_mapping/src/exception.rs 12_exceptions_p diff -uNr 11_virtual_mem_part1_identity_mapping/src/main.rs 12_exceptions_part1_groundwork/src/main.rs --- 11_virtual_mem_part1_identity_mapping/src/main.rs +++ 12_exceptions_part1_groundwork/src/main.rs -@@ -113,6 +113,7 @@ - #![feature(const_generics)] +@@ -114,6 +114,7 @@ #![feature(const_panic)] + #![feature(core_intrinsics)] #![feature(format_args_nl)] +#![feature(global_asm)] #![feature(panic_info_message)] #![feature(trait_alias)] #![no_main] -@@ -144,6 +145,8 @@ +@@ -145,6 +146,8 @@ use driver::interface::DriverManager; use memory::mmu::interface::MMU; + exception::handling_init(); + - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { panic!("MMU: {}", string); } -@@ -196,13 +199,28 @@ +@@ -197,13 +200,28 @@ info!("Timer test, spinning for 1 second"); time::time_manager().spin_for(Duration::from_secs(1)); diff --git a/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs b/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs index 3504d257..2f7bf615 100644 --- a/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs +++ b/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs @@ -17,6 +17,7 @@ use crate::{ bsp, memory, memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule}, }; +use core::intrinsics::unlikely; use cortex_a::{barrier, regs::*}; //-------------------------------------------------------------------------------------------------- @@ -33,15 +34,6 @@ struct MemoryManagementUnit; pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>; pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>; -/// The min supported address space size. -pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB - -/// The max supported address space size. -pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB - -/// The supported address space size granule. -pub type AddrSpaceSizeGranule = Granule512MiB; - /// Constants for indexing the MAIR_EL1. #[allow(dead_code)] pub mod mair { @@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit; // Private Code //-------------------------------------------------------------------------------------------------- +impl memory::mmu::AddressSpace { + /// Checks for architectural restrictions. + pub const fn arch_address_space_size_sanity_checks() { + // Size must be at least one full 512 MiB table. + assert!((AS_SIZE % Granule512MiB::SIZE) == 0); + + // Check for 48 bit virtual address size as maximum, which is supported by any ARMv8 + // version. + assert!(AS_SIZE <= (1 << 48)); + } +} + impl MemoryManagementUnit { /// Setup function for the MAIR_EL1 register. fn set_up_mair(&self) { @@ -82,19 +86,19 @@ impl MemoryManagementUnit { /// Configure various settings of stage 1 of the EL1 translation regime. fn configure_translation_control(&self) { - let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); - let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64; + let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64; TCR_EL1.write( - TCR_EL1::TBI0::Ignored - + TCR_EL1::IPS.val(ips) - + TCR_EL1::EPD1::DisableTTBR1Walks + TCR_EL1::TBI0::Used + + TCR_EL1::IPS::Bits_40 + TCR_EL1::TG0::KiB_64 + TCR_EL1::SH0::Inner + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::T0SZ.val(t0sz), + + TCR_EL1::A1::TTBR0 + + TCR_EL1::T0SZ.val(t0sz) + + TCR_EL1::EPD1::DisableTTBR1Walks, ); } } @@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU { //------------------------------------------------------------------------------ // OS Interface Code //------------------------------------------------------------------------------ +use memory::mmu::MMUEnableError; impl memory::mmu::interface::MMU for MemoryManagementUnit { - unsafe fn init(&self) -> Result<(), &'static str> { - // Fail early if translation granule is not supported. Both RPis support it, though. - if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { - return Err("Translation granule not supported in HW"); + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> { + if unlikely(self.is_enabled()) { + return Err(MMUEnableError::AlreadyEnabled); + } + + // Fail early if translation granule is not supported. + if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) { + return Err(MMUEnableError::Other( + "Translation granule not supported in HW", + )); } // Prepare the memory attribute indirection register. self.set_up_mair(); // Populate translation tables. - KERNEL_TABLES.populate_tt_entries()?; + KERNEL_TABLES + .populate_tt_entries() + .map_err(|e| MMUEnableError::Other(e))?; // Set the "Translation Table Base Register". - TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address()); + TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address()); self.configure_translation_control(); @@ -143,4 +156,9 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit { Ok(()) } + + #[inline(always)] + fn is_enabled(&self) -> bool { + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) + } } diff --git a/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translation_table.rs b/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translation_table.rs index cbe1d783..f38d0895 100644 --- a/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translation_table.rs +++ b/12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translation_table.rs @@ -87,8 +87,8 @@ register_bitfields! {u64, AttrIndx OFFSET(2) NUMBITS(3) [], TYPE OFFSET(1) NUMBITS(1) [ - Block = 0, - Table = 1 + Reserved_Invalid = 0, + Page = 1 ], VALID OFFSET(0) NUMBITS(1) [ @@ -116,19 +116,19 @@ struct PageDescriptor { value: u64, } -trait BaseAddr { - fn base_addr_u64(&self) -> u64; - fn base_addr_usize(&self) -> usize; +trait StartAddr { + fn phys_start_addr_u64(&self) -> u64; + fn phys_start_addr_usize(&self) -> usize; } -const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; +const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; //-------------------------------------------------------------------------------------------------- // Public Definitions //-------------------------------------------------------------------------------------------------- /// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB -/// aligned, hence the "reverse" order of appearance. +/// aligned, so the lvl3 is put first. #[repr(C)] #[repr(align(65536))] pub struct FixedSizeTranslationTable { @@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable; // Private Code //-------------------------------------------------------------------------------------------------- -impl BaseAddr for [T; N] { - fn base_addr_u64(&self) -> u64 { +// The binary is still identity mapped, so we don't need to convert here. +impl StartAddr for [T; N] { + fn phys_start_addr_u64(&self) -> u64 { self as *const T as u64 } - fn base_addr_usize(&self) -> usize { + fn phys_start_addr_usize(&self) -> usize { self as *const _ as usize } } @@ -165,14 +166,14 @@ impl TableDescriptor { } /// Create an instance pointing to the supplied address. - pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self { + pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self { let val = InMemoryRegister::::new(0); - let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT; + let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT; val.write( - STAGE1_TABLE_DESCRIPTOR::VALID::True + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + STAGE1_TABLE_DESCRIPTOR::TYPE::Table - + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64), + + STAGE1_TABLE_DESCRIPTOR::VALID::True, ); TableDescriptor { value: val.get() } @@ -225,16 +226,16 @@ impl PageDescriptor { } /// Create an instance. - pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self { + pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self { let val = InMemoryRegister::::new(0); - let shifted = output_addr as u64 >> Granule64KiB::SHIFT; + let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; val.write( - STAGE1_PAGE_DESCRIPTOR::VALID::True + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted) + STAGE1_PAGE_DESCRIPTOR::AF::True - + attribute_fields.into() - + STAGE1_PAGE_DESCRIPTOR::TYPE::Table - + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted), + + STAGE1_PAGE_DESCRIPTOR::TYPE::Page + + STAGE1_PAGE_DESCRIPTOR::VALID::True + + attribute_fields.clone().into(), ); Self { value: val.get() } @@ -247,10 +248,9 @@ impl PageDescriptor { impl FixedSizeTranslationTable { /// Create an instance. - #[allow(clippy::assertions_on_constants)] pub const fn new() -> Self { + // Can't have a zero-sized address space. assert!(NUM_TABLES > 0); - assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0); Self { lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], @@ -266,15 +266,15 @@ impl FixedSizeTranslationTable { pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> { for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { *l2_entry = - TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize()); + TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize()); for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT); - let (output_addr, attribute_fields) = + let (phys_output_addr, attribute_fields) = bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?; - *l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields); + *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields); } } @@ -282,7 +282,7 @@ impl FixedSizeTranslationTable { } /// The translation table's base address to be used for programming the MMU. - pub fn base_address(&self) -> u64 { - self.lvl2.base_addr_u64() + pub fn phys_base_address(&self) -> u64 { + self.lvl2.phys_start_addr_u64() } } diff --git a/12_exceptions_part1_groundwork/src/bsp/raspberrypi/memory/mmu.rs b/12_exceptions_part1_groundwork/src/bsp/raspberrypi/memory/mmu.rs index fe98604d..1775c07c 100644 --- a/12_exceptions_part1_groundwork/src/bsp/raspberrypi/memory/mmu.rs +++ b/12_exceptions_part1_groundwork/src/bsp/raspberrypi/memory/mmu.rs @@ -12,8 +12,8 @@ use core::ops::RangeInclusive; // Public Definitions //-------------------------------------------------------------------------------------------------- -/// The address space size chosen by this BSP. -pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>; +/// The kernel's address space defined by this BSP. +pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>; const NUM_MEM_RANGES: usize = 2; diff --git a/12_exceptions_part1_groundwork/src/main.rs b/12_exceptions_part1_groundwork/src/main.rs index 9dee3a44..8e8c089f 100644 --- a/12_exceptions_part1_groundwork/src/main.rs +++ b/12_exceptions_part1_groundwork/src/main.rs @@ -112,6 +112,7 @@ #![feature(const_fn_fn_ptr_basics)] #![feature(const_generics)] #![feature(const_panic)] +#![feature(core_intrinsics)] #![feature(format_args_nl)] #![feature(global_asm)] #![feature(panic_info_message)] @@ -137,7 +138,7 @@ mod time; /// /// - Only a single core must be active and running this function. /// - The init calls in this function must appear in the correct order: -/// - Virtual memory must be activated before the device drivers. +/// - Caching must be activated before the device drivers. /// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device /// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on /// the RPi SoCs. @@ -147,7 +148,7 @@ unsafe fn kernel_init() -> ! { exception::handling_init(); - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { panic!("MMU: {}", string); } diff --git a/12_exceptions_part1_groundwork/src/memory/mmu.rs b/12_exceptions_part1_groundwork/src/memory/mmu.rs index efc9c447..77cdcc06 100644 --- a/12_exceptions_part1_groundwork/src/memory/mmu.rs +++ b/12_exceptions_part1_groundwork/src/memory/mmu.rs @@ -31,8 +31,17 @@ pub use arch_mmu::mmu; // Public Definitions //-------------------------------------------------------------------------------------------------- +/// MMU enable errors variants. +#[allow(missing_docs)] +#[derive(Debug)] +pub enum MMUEnableError { + AlreadyEnabled, + Other(&'static str), +} + /// Memory Management interfaces. pub mod interface { + use super::*; /// MMU functions. pub trait MMU { @@ -42,15 +51,18 @@ pub mod interface { /// # Safety /// /// - Changes the HW's global state. - unsafe fn init(&self) -> Result<(), &'static str>; + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>; + + /// Returns true if the MMU is enabled, false otherwise. + fn is_enabled(&self) -> bool; } } /// Describes the characteristics of a translation granule. pub struct TranslationGranule; -/// Describes the size of an address space. -pub struct AddressSpaceSize; +/// Describes properties of an address space. +pub struct AddressSpace; /// Architecture agnostic translation types. #[allow(missing_docs)] @@ -108,6 +120,15 @@ pub struct KernelVirtualLayout { // Public Code //-------------------------------------------------------------------------------------------------- +impl fmt::Display for MMUEnableError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"), + MMUEnableError::Other(x) => write!(f, "{}", x), + } + } +} + impl TranslationGranule { /// The granule's size. pub const SIZE: usize = Self::size_checked(); @@ -122,22 +143,18 @@ impl TranslationGranule { } } -impl AddressSpaceSize { +impl AddressSpace { /// The address space size. pub const SIZE: usize = Self::size_checked(); /// The address space shift, aka log2(size). - pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize; const fn size_checked() -> usize { assert!(AS_SIZE.is_power_of_two()); - assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two()); - assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two()); - // Must adhere to architectural restrictions. - assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE); - assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE); - assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0); + // Check for architectural restrictions as well. + Self::arch_address_space_size_sanity_checks(); AS_SIZE } diff --git a/13_integrated_testing/README.md b/13_integrated_testing/README.md index c0fc217b..452c864e 100644 --- a/13_integrated_testing/README.md +++ b/13_integrated_testing/README.md @@ -1026,7 +1026,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translatio --- 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translation_table.rs +++ 13_integrated_testing/src/_arch/aarch64/memory/mmu/translation_table.rs @@ -286,3 +286,31 @@ - self.lvl2.base_addr_u64() + self.lvl2.phys_start_addr_u64() } } + @@ -1061,8 +1061,8 @@ diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu/translatio diff -uNr 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs 13_integrated_testing/src/_arch/aarch64/memory/mmu.rs --- 12_exceptions_part1_groundwork/src/_arch/aarch64/memory/mmu.rs +++ 13_integrated_testing/src/_arch/aarch64/memory/mmu.rs -@@ -144,3 +144,22 @@ - Ok(()) +@@ -162,3 +162,22 @@ + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) } } + @@ -1194,7 +1194,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/exception.rs 13_integrated_testing/ diff -uNr 12_exceptions_part1_groundwork/src/lib.rs 13_integrated_testing/src/lib.rs --- 12_exceptions_part1_groundwork/src/lib.rs +++ 13_integrated_testing/src/lib.rs -@@ -0,0 +1,171 @@ +@@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2018-2021 Andre Richter @@ -1311,6 +1311,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/lib.rs 13_integrated_testing/src/li +#![feature(const_fn_fn_ptr_basics)] +#![feature(const_generics)] +#![feature(const_panic)] ++#![feature(core_intrinsics)] +#![feature(format_args_nl)] +#![feature(global_asm)] +#![feature(linkage)] @@ -1370,7 +1371,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/lib.rs 13_integrated_testing/src/li diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/main.rs --- 12_exceptions_part1_groundwork/src/main.rs +++ 13_integrated_testing/src/main.rs -@@ -6,130 +6,12 @@ +@@ -6,131 +6,12 @@ #![doc(html_logo_url = "https://git.io/JeGIp")] //! The `kernel` binary. @@ -1480,6 +1481,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m -#![feature(const_fn_fn_ptr_basics)] -#![feature(const_generics)] -#![feature(const_panic)] +-#![feature(core_intrinsics)] + #![feature(format_args_nl)] -#![feature(global_asm)] @@ -1503,7 +1505,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m /// Early init code. /// -@@ -141,6 +23,7 @@ +@@ -142,6 +23,7 @@ /// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device /// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on /// the RPi SoCs. @@ -1511,7 +1513,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m unsafe fn kernel_init() -> ! { use driver::interface::DriverManager; use memory::mmu::interface::MMU; -@@ -167,9 +50,7 @@ +@@ -168,9 +50,7 @@ fn kernel_main() -> ! { use bsp::console::console; use console::interface::All; @@ -1521,7 +1523,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m info!("Booting on: {}", bsp::board_name()); -@@ -196,31 +77,6 @@ +@@ -197,31 +77,6 @@ info!(" {}. {}", i + 1, driver.compatible()); } @@ -1557,7 +1559,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/main.rs 13_integrated_testing/src/m diff -uNr 12_exceptions_part1_groundwork/src/memory/mmu.rs 13_integrated_testing/src/memory/mmu.rs --- 12_exceptions_part1_groundwork/src/memory/mmu.rs +++ 13_integrated_testing/src/memory/mmu.rs -@@ -54,7 +54,6 @@ +@@ -66,7 +66,6 @@ /// Architecture agnostic translation types. #[allow(missing_docs)] @@ -1565,7 +1567,7 @@ diff -uNr 12_exceptions_part1_groundwork/src/memory/mmu.rs 13_integrated_testing #[derive(Copy, Clone)] pub enum Translation { Identity, -@@ -244,4 +243,9 @@ +@@ -261,4 +260,9 @@ info!("{}", i); } } @@ -1910,7 +1912,7 @@ diff -uNr 12_exceptions_part1_groundwork/tests/02_exception_sync_page_fault.rs 1 + println!("Testing synchronous exception handling by causing a page fault"); + println!("-------------------------------------------------------------------\n"); + -+ if let Err(string) = memory::mmu::mmu().init() { ++ if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { + println!("MMU: {}", string); + cpu::qemu_exit_failure() + } diff --git a/13_integrated_testing/src/_arch/aarch64/memory/mmu.rs b/13_integrated_testing/src/_arch/aarch64/memory/mmu.rs index 42ba0519..29e8125d 100644 --- a/13_integrated_testing/src/_arch/aarch64/memory/mmu.rs +++ b/13_integrated_testing/src/_arch/aarch64/memory/mmu.rs @@ -17,6 +17,7 @@ use crate::{ bsp, memory, memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule}, }; +use core::intrinsics::unlikely; use cortex_a::{barrier, regs::*}; //-------------------------------------------------------------------------------------------------- @@ -33,15 +34,6 @@ struct MemoryManagementUnit; pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>; pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>; -/// The min supported address space size. -pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB - -/// The max supported address space size. -pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB - -/// The supported address space size granule. -pub type AddrSpaceSizeGranule = Granule512MiB; - /// Constants for indexing the MAIR_EL1. #[allow(dead_code)] pub mod mair { @@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit; // Private Code //-------------------------------------------------------------------------------------------------- +impl memory::mmu::AddressSpace { + /// Checks for architectural restrictions. + pub const fn arch_address_space_size_sanity_checks() { + // Size must be at least one full 512 MiB table. + assert!((AS_SIZE % Granule512MiB::SIZE) == 0); + + // Check for 48 bit virtual address size as maximum, which is supported by any ARMv8 + // version. + assert!(AS_SIZE <= (1 << 48)); + } +} + impl MemoryManagementUnit { /// Setup function for the MAIR_EL1 register. fn set_up_mair(&self) { @@ -82,19 +86,19 @@ impl MemoryManagementUnit { /// Configure various settings of stage 1 of the EL1 translation regime. fn configure_translation_control(&self) { - let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); - let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64; + let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64; TCR_EL1.write( - TCR_EL1::TBI0::Ignored - + TCR_EL1::IPS.val(ips) - + TCR_EL1::EPD1::DisableTTBR1Walks + TCR_EL1::TBI0::Used + + TCR_EL1::IPS::Bits_40 + TCR_EL1::TG0::KiB_64 + TCR_EL1::SH0::Inner + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::T0SZ.val(t0sz), + + TCR_EL1::A1::TTBR0 + + TCR_EL1::T0SZ.val(t0sz) + + TCR_EL1::EPD1::DisableTTBR1Walks, ); } } @@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU { //------------------------------------------------------------------------------ // OS Interface Code //------------------------------------------------------------------------------ +use memory::mmu::MMUEnableError; impl memory::mmu::interface::MMU for MemoryManagementUnit { - unsafe fn init(&self) -> Result<(), &'static str> { - // Fail early if translation granule is not supported. Both RPis support it, though. - if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { - return Err("Translation granule not supported in HW"); + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> { + if unlikely(self.is_enabled()) { + return Err(MMUEnableError::AlreadyEnabled); + } + + // Fail early if translation granule is not supported. + if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) { + return Err(MMUEnableError::Other( + "Translation granule not supported in HW", + )); } // Prepare the memory attribute indirection register. self.set_up_mair(); // Populate translation tables. - KERNEL_TABLES.populate_tt_entries()?; + KERNEL_TABLES + .populate_tt_entries() + .map_err(|e| MMUEnableError::Other(e))?; // Set the "Translation Table Base Register". - TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address()); + TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address()); self.configure_translation_control(); @@ -143,6 +156,11 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit { Ok(()) } + + #[inline(always)] + fn is_enabled(&self) -> bool { + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) + } } //-------------------------------------------------------------------------------------------------- diff --git a/13_integrated_testing/src/_arch/aarch64/memory/mmu/translation_table.rs b/13_integrated_testing/src/_arch/aarch64/memory/mmu/translation_table.rs index 337f9aed..73a93ff7 100644 --- a/13_integrated_testing/src/_arch/aarch64/memory/mmu/translation_table.rs +++ b/13_integrated_testing/src/_arch/aarch64/memory/mmu/translation_table.rs @@ -87,8 +87,8 @@ register_bitfields! {u64, AttrIndx OFFSET(2) NUMBITS(3) [], TYPE OFFSET(1) NUMBITS(1) [ - Block = 0, - Table = 1 + Reserved_Invalid = 0, + Page = 1 ], VALID OFFSET(0) NUMBITS(1) [ @@ -116,19 +116,19 @@ struct PageDescriptor { value: u64, } -trait BaseAddr { - fn base_addr_u64(&self) -> u64; - fn base_addr_usize(&self) -> usize; +trait StartAddr { + fn phys_start_addr_u64(&self) -> u64; + fn phys_start_addr_usize(&self) -> usize; } -const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; +const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; //-------------------------------------------------------------------------------------------------- // Public Definitions //-------------------------------------------------------------------------------------------------- /// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB -/// aligned, hence the "reverse" order of appearance. +/// aligned, so the lvl3 is put first. #[repr(C)] #[repr(align(65536))] pub struct FixedSizeTranslationTable { @@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable; // Private Code //-------------------------------------------------------------------------------------------------- -impl BaseAddr for [T; N] { - fn base_addr_u64(&self) -> u64 { +// The binary is still identity mapped, so we don't need to convert here. +impl StartAddr for [T; N] { + fn phys_start_addr_u64(&self) -> u64 { self as *const T as u64 } - fn base_addr_usize(&self) -> usize { + fn phys_start_addr_usize(&self) -> usize { self as *const _ as usize } } @@ -165,14 +166,14 @@ impl TableDescriptor { } /// Create an instance pointing to the supplied address. - pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self { + pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self { let val = InMemoryRegister::::new(0); - let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT; + let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT; val.write( - STAGE1_TABLE_DESCRIPTOR::VALID::True + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + STAGE1_TABLE_DESCRIPTOR::TYPE::Table - + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64), + + STAGE1_TABLE_DESCRIPTOR::VALID::True, ); TableDescriptor { value: val.get() } @@ -225,16 +226,16 @@ impl PageDescriptor { } /// Create an instance. - pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self { + pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self { let val = InMemoryRegister::::new(0); - let shifted = output_addr as u64 >> Granule64KiB::SHIFT; + let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; val.write( - STAGE1_PAGE_DESCRIPTOR::VALID::True + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted) + STAGE1_PAGE_DESCRIPTOR::AF::True - + attribute_fields.into() - + STAGE1_PAGE_DESCRIPTOR::TYPE::Table - + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted), + + STAGE1_PAGE_DESCRIPTOR::TYPE::Page + + STAGE1_PAGE_DESCRIPTOR::VALID::True + + attribute_fields.clone().into(), ); Self { value: val.get() } @@ -247,10 +248,9 @@ impl PageDescriptor { impl FixedSizeTranslationTable { /// Create an instance. - #[allow(clippy::assertions_on_constants)] pub const fn new() -> Self { + // Can't have a zero-sized address space. assert!(NUM_TABLES > 0); - assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0); Self { lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], @@ -266,15 +266,15 @@ impl FixedSizeTranslationTable { pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> { for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { *l2_entry = - TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize()); + TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize()); for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT); - let (output_addr, attribute_fields) = + let (phys_output_addr, attribute_fields) = bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?; - *l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields); + *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields); } } @@ -282,8 +282,8 @@ impl FixedSizeTranslationTable { } /// The translation table's base address to be used for programming the MMU. - pub fn base_address(&self) -> u64 { - self.lvl2.base_addr_u64() + pub fn phys_base_address(&self) -> u64 { + self.lvl2.phys_start_addr_u64() } } diff --git a/13_integrated_testing/src/bsp/raspberrypi/memory/mmu.rs b/13_integrated_testing/src/bsp/raspberrypi/memory/mmu.rs index 451d927b..0ccfae00 100644 --- a/13_integrated_testing/src/bsp/raspberrypi/memory/mmu.rs +++ b/13_integrated_testing/src/bsp/raspberrypi/memory/mmu.rs @@ -12,8 +12,8 @@ use core::ops::RangeInclusive; // Public Definitions //-------------------------------------------------------------------------------------------------- -/// The address space size chosen by this BSP. -pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>; +/// The kernel's address space defined by this BSP. +pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>; const NUM_MEM_RANGES: usize = 2; diff --git a/13_integrated_testing/src/lib.rs b/13_integrated_testing/src/lib.rs index 0fa216a9..b094daf3 100644 --- a/13_integrated_testing/src/lib.rs +++ b/13_integrated_testing/src/lib.rs @@ -114,6 +114,7 @@ #![feature(const_fn_fn_ptr_basics)] #![feature(const_generics)] #![feature(const_panic)] +#![feature(core_intrinsics)] #![feature(format_args_nl)] #![feature(global_asm)] #![feature(linkage)] diff --git a/13_integrated_testing/src/main.rs b/13_integrated_testing/src/main.rs index 769f9e8f..101124df 100644 --- a/13_integrated_testing/src/main.rs +++ b/13_integrated_testing/src/main.rs @@ -19,7 +19,7 @@ use libkernel::{bsp, console, driver, exception, info, memory, time}; /// /// - Only a single core must be active and running this function. /// - The init calls in this function must appear in the correct order: -/// - Virtual memory must be activated before the device drivers. +/// - Caching must be activated before the device drivers. /// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device /// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on /// the RPi SoCs. @@ -30,7 +30,7 @@ unsafe fn kernel_init() -> ! { exception::handling_init(); - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { panic!("MMU: {}", string); } diff --git a/13_integrated_testing/src/memory/mmu.rs b/13_integrated_testing/src/memory/mmu.rs index cca2951a..ec4ca074 100644 --- a/13_integrated_testing/src/memory/mmu.rs +++ b/13_integrated_testing/src/memory/mmu.rs @@ -31,8 +31,17 @@ pub use arch_mmu::mmu; // Public Definitions //-------------------------------------------------------------------------------------------------- +/// MMU enable errors variants. +#[allow(missing_docs)] +#[derive(Debug)] +pub enum MMUEnableError { + AlreadyEnabled, + Other(&'static str), +} + /// Memory Management interfaces. pub mod interface { + use super::*; /// MMU functions. pub trait MMU { @@ -42,15 +51,18 @@ pub mod interface { /// # Safety /// /// - Changes the HW's global state. - unsafe fn init(&self) -> Result<(), &'static str>; + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>; + + /// Returns true if the MMU is enabled, false otherwise. + fn is_enabled(&self) -> bool; } } /// Describes the characteristics of a translation granule. pub struct TranslationGranule; -/// Describes the size of an address space. -pub struct AddressSpaceSize; +/// Describes properties of an address space. +pub struct AddressSpace; /// Architecture agnostic translation types. #[allow(missing_docs)] @@ -107,6 +119,15 @@ pub struct KernelVirtualLayout { // Public Code //-------------------------------------------------------------------------------------------------- +impl fmt::Display for MMUEnableError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"), + MMUEnableError::Other(x) => write!(f, "{}", x), + } + } +} + impl TranslationGranule { /// The granule's size. pub const SIZE: usize = Self::size_checked(); @@ -121,22 +142,18 @@ impl TranslationGranule { } } -impl AddressSpaceSize { +impl AddressSpace { /// The address space size. pub const SIZE: usize = Self::size_checked(); /// The address space shift, aka log2(size). - pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize; const fn size_checked() -> usize { assert!(AS_SIZE.is_power_of_two()); - assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two()); - assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two()); - // Must adhere to architectural restrictions. - assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE); - assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE); - assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0); + // Check for architectural restrictions as well. + Self::arch_address_space_size_sanity_checks(); AS_SIZE } diff --git a/13_integrated_testing/tests/02_exception_sync_page_fault.rs b/13_integrated_testing/tests/02_exception_sync_page_fault.rs index 185089a3..f1535d34 100644 --- a/13_integrated_testing/tests/02_exception_sync_page_fault.rs +++ b/13_integrated_testing/tests/02_exception_sync_page_fault.rs @@ -29,7 +29,7 @@ unsafe fn kernel_init() -> ! { println!("Testing synchronous exception handling by causing a page fault"); println!("-------------------------------------------------------------------\n"); - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { println!("MMU: {}", string); cpu::qemu_exit_failure() } diff --git a/14_exceptions_part2_peripheral_IRQs/README.md b/14_exceptions_part2_peripheral_IRQs/README.md index 1c3cb7f0..5e1f45e3 100644 --- a/14_exceptions_part2_peripheral_IRQs/README.md +++ b/14_exceptions_part2_peripheral_IRQs/README.md @@ -2296,7 +2296,7 @@ diff -uNr 13_integrated_testing/src/exception/asynchronous.rs 14_exceptions_part diff -uNr 13_integrated_testing/src/lib.rs 14_exceptions_part2_peripheral_IRQs/src/lib.rs --- 13_integrated_testing/src/lib.rs +++ 14_exceptions_part2_peripheral_IRQs/src/lib.rs -@@ -111,9 +111,11 @@ +@@ -111,6 +111,7 @@ #![allow(clippy::clippy::upper_case_acronyms)] #![allow(incomplete_features)] @@ -2304,11 +2304,7 @@ diff -uNr 13_integrated_testing/src/lib.rs 14_exceptions_part2_peripheral_IRQs/s #![feature(const_fn_fn_ptr_basics)] #![feature(const_generics)] #![feature(const_panic)] -+#![feature(core_intrinsics)] - #![feature(format_args_nl)] - #![feature(global_asm)] - #![feature(linkage)] -@@ -137,6 +139,7 @@ +@@ -138,6 +139,7 @@ pub mod exception; pub mod memory; pub mod print; @@ -2331,7 +2327,7 @@ diff -uNr 13_integrated_testing/src/main.rs 14_exceptions_part2_peripheral_IRQs/ /// @@ -21,8 +21,8 @@ /// - The init calls in this function must appear in the correct order: - /// - Virtual memory must be activated before the device drivers. + /// - Caching must be activated before the device drivers. /// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device -/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on -/// the RPi SoCs. @@ -2590,7 +2586,7 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip type Data = T; fn lock(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R { -@@ -72,6 +110,32 @@ +@@ -72,6 +110,50 @@ // mutable reference will ever only be given out once at a time. let data = unsafe { &mut *self.data.get() }; @@ -2614,14 +2610,32 @@ diff -uNr 13_integrated_testing/src/synchronization.rs 14_exceptions_part2_perip + + let data = unsafe { &mut *self.data.get() }; + -+ f(data) -+ } + f(data) + } + + fn read(&self, f: impl FnOnce(&Self::Data) -> R) -> R { + let data = unsafe { &*self.data.get() }; + - f(data) - } ++ f(data) ++ } ++} ++ ++//-------------------------------------------------------------------------------------------------- ++// Testing ++//-------------------------------------------------------------------------------------------------- ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ use test_macros::kernel_test; ++ ++ /// InitStateLock must be transparent. ++ #[kernel_test] ++ fn init_state_lock_is_transparent() { ++ use core::mem::size_of; ++ ++ assert_eq!(size_of::>(), size_of::()); ++ } } diff -uNr 13_integrated_testing/tests/03_exception_irq_sanity.rs 14_exceptions_part2_peripheral_IRQs/tests/03_exception_irq_sanity.rs diff --git a/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs b/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs index 42ba0519..29e8125d 100644 --- a/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs +++ b/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs @@ -17,6 +17,7 @@ use crate::{ bsp, memory, memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule}, }; +use core::intrinsics::unlikely; use cortex_a::{barrier, regs::*}; //-------------------------------------------------------------------------------------------------- @@ -33,15 +34,6 @@ struct MemoryManagementUnit; pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>; pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>; -/// The min supported address space size. -pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB - -/// The max supported address space size. -pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB - -/// The supported address space size granule. -pub type AddrSpaceSizeGranule = Granule512MiB; - /// Constants for indexing the MAIR_EL1. #[allow(dead_code)] pub mod mair { @@ -66,6 +58,18 @@ static MMU: MemoryManagementUnit = MemoryManagementUnit; // Private Code //-------------------------------------------------------------------------------------------------- +impl memory::mmu::AddressSpace { + /// Checks for architectural restrictions. + pub const fn arch_address_space_size_sanity_checks() { + // Size must be at least one full 512 MiB table. + assert!((AS_SIZE % Granule512MiB::SIZE) == 0); + + // Check for 48 bit virtual address size as maximum, which is supported by any ARMv8 + // version. + assert!(AS_SIZE <= (1 << 48)); + } +} + impl MemoryManagementUnit { /// Setup function for the MAIR_EL1 register. fn set_up_mair(&self) { @@ -82,19 +86,19 @@ impl MemoryManagementUnit { /// Configure various settings of stage 1 of the EL1 translation regime. fn configure_translation_control(&self) { - let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); - let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64; + let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64; TCR_EL1.write( - TCR_EL1::TBI0::Ignored - + TCR_EL1::IPS.val(ips) - + TCR_EL1::EPD1::DisableTTBR1Walks + TCR_EL1::TBI0::Used + + TCR_EL1::IPS::Bits_40 + TCR_EL1::TG0::KiB_64 + TCR_EL1::SH0::Inner + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::T0SZ.val(t0sz), + + TCR_EL1::A1::TTBR0 + + TCR_EL1::T0SZ.val(t0sz) + + TCR_EL1::EPD1::DisableTTBR1Walks, ); } } @@ -111,22 +115,31 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU { //------------------------------------------------------------------------------ // OS Interface Code //------------------------------------------------------------------------------ +use memory::mmu::MMUEnableError; impl memory::mmu::interface::MMU for MemoryManagementUnit { - unsafe fn init(&self) -> Result<(), &'static str> { - // Fail early if translation granule is not supported. Both RPis support it, though. - if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { - return Err("Translation granule not supported in HW"); + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> { + if unlikely(self.is_enabled()) { + return Err(MMUEnableError::AlreadyEnabled); + } + + // Fail early if translation granule is not supported. + if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) { + return Err(MMUEnableError::Other( + "Translation granule not supported in HW", + )); } // Prepare the memory attribute indirection register. self.set_up_mair(); // Populate translation tables. - KERNEL_TABLES.populate_tt_entries()?; + KERNEL_TABLES + .populate_tt_entries() + .map_err(|e| MMUEnableError::Other(e))?; // Set the "Translation Table Base Register". - TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address()); + TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address()); self.configure_translation_control(); @@ -143,6 +156,11 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit { Ok(()) } + + #[inline(always)] + fn is_enabled(&self) -> bool { + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) + } } //-------------------------------------------------------------------------------------------------- diff --git a/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/translation_table.rs b/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/translation_table.rs index 337f9aed..73a93ff7 100644 --- a/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/translation_table.rs +++ b/14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/translation_table.rs @@ -87,8 +87,8 @@ register_bitfields! {u64, AttrIndx OFFSET(2) NUMBITS(3) [], TYPE OFFSET(1) NUMBITS(1) [ - Block = 0, - Table = 1 + Reserved_Invalid = 0, + Page = 1 ], VALID OFFSET(0) NUMBITS(1) [ @@ -116,19 +116,19 @@ struct PageDescriptor { value: u64, } -trait BaseAddr { - fn base_addr_u64(&self) -> u64; - fn base_addr_usize(&self) -> usize; +trait StartAddr { + fn phys_start_addr_u64(&self) -> u64; + fn phys_start_addr_usize(&self) -> usize; } -const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; +const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; //-------------------------------------------------------------------------------------------------- // Public Definitions //-------------------------------------------------------------------------------------------------- /// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB -/// aligned, hence the "reverse" order of appearance. +/// aligned, so the lvl3 is put first. #[repr(C)] #[repr(align(65536))] pub struct FixedSizeTranslationTable { @@ -146,12 +146,13 @@ pub type KernelTranslationTable = FixedSizeTranslationTable; // Private Code //-------------------------------------------------------------------------------------------------- -impl BaseAddr for [T; N] { - fn base_addr_u64(&self) -> u64 { +// The binary is still identity mapped, so we don't need to convert here. +impl StartAddr for [T; N] { + fn phys_start_addr_u64(&self) -> u64 { self as *const T as u64 } - fn base_addr_usize(&self) -> usize { + fn phys_start_addr_usize(&self) -> usize { self as *const _ as usize } } @@ -165,14 +166,14 @@ impl TableDescriptor { } /// Create an instance pointing to the supplied address. - pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self { + pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self { let val = InMemoryRegister::::new(0); - let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT; + let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT; val.write( - STAGE1_TABLE_DESCRIPTOR::VALID::True + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + STAGE1_TABLE_DESCRIPTOR::TYPE::Table - + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64), + + STAGE1_TABLE_DESCRIPTOR::VALID::True, ); TableDescriptor { value: val.get() } @@ -225,16 +226,16 @@ impl PageDescriptor { } /// Create an instance. - pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self { + pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self { let val = InMemoryRegister::::new(0); - let shifted = output_addr as u64 >> Granule64KiB::SHIFT; + let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; val.write( - STAGE1_PAGE_DESCRIPTOR::VALID::True + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted) + STAGE1_PAGE_DESCRIPTOR::AF::True - + attribute_fields.into() - + STAGE1_PAGE_DESCRIPTOR::TYPE::Table - + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted), + + STAGE1_PAGE_DESCRIPTOR::TYPE::Page + + STAGE1_PAGE_DESCRIPTOR::VALID::True + + attribute_fields.clone().into(), ); Self { value: val.get() } @@ -247,10 +248,9 @@ impl PageDescriptor { impl FixedSizeTranslationTable { /// Create an instance. - #[allow(clippy::assertions_on_constants)] pub const fn new() -> Self { + // Can't have a zero-sized address space. assert!(NUM_TABLES > 0); - assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0); Self { lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], @@ -266,15 +266,15 @@ impl FixedSizeTranslationTable { pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> { for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { *l2_entry = - TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize()); + TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize()); for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT); - let (output_addr, attribute_fields) = + let (phys_output_addr, attribute_fields) = bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?; - *l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields); + *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields); } } @@ -282,8 +282,8 @@ impl FixedSizeTranslationTable { } /// The translation table's base address to be used for programming the MMU. - pub fn base_address(&self) -> u64 { - self.lvl2.base_addr_u64() + pub fn phys_base_address(&self) -> u64 { + self.lvl2.phys_start_addr_u64() } } diff --git a/14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs b/14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs index 451d927b..0ccfae00 100644 --- a/14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs +++ b/14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs @@ -12,8 +12,8 @@ use core::ops::RangeInclusive; // Public Definitions //-------------------------------------------------------------------------------------------------- -/// The address space size chosen by this BSP. -pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>; +/// The kernel's address space defined by this BSP. +pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>; const NUM_MEM_RANGES: usize = 2; diff --git a/14_exceptions_part2_peripheral_IRQs/src/main.rs b/14_exceptions_part2_peripheral_IRQs/src/main.rs index 6a21c8d4..0bd9f71d 100644 --- a/14_exceptions_part2_peripheral_IRQs/src/main.rs +++ b/14_exceptions_part2_peripheral_IRQs/src/main.rs @@ -19,7 +19,7 @@ use libkernel::{bsp, cpu, driver, exception, info, memory, state, time, warn}; /// /// - Only a single core must be active and running this function. /// - The init calls in this function must appear in the correct order: -/// - Virtual memory must be activated before the device drivers. +/// - Caching must be activated before the device drivers. /// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device /// drivers (which currently employ IRQSafeNullLocks instead of spinlocks), will fail to /// work on the RPi SoCs. @@ -30,7 +30,7 @@ unsafe fn kernel_init() -> ! { exception::handling_init(); - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { panic!("MMU: {}", string); } diff --git a/14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs b/14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs index cca2951a..ec4ca074 100644 --- a/14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs +++ b/14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs @@ -31,8 +31,17 @@ pub use arch_mmu::mmu; // Public Definitions //-------------------------------------------------------------------------------------------------- +/// MMU enable errors variants. +#[allow(missing_docs)] +#[derive(Debug)] +pub enum MMUEnableError { + AlreadyEnabled, + Other(&'static str), +} + /// Memory Management interfaces. pub mod interface { + use super::*; /// MMU functions. pub trait MMU { @@ -42,15 +51,18 @@ pub mod interface { /// # Safety /// /// - Changes the HW's global state. - unsafe fn init(&self) -> Result<(), &'static str>; + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>; + + /// Returns true if the MMU is enabled, false otherwise. + fn is_enabled(&self) -> bool; } } /// Describes the characteristics of a translation granule. pub struct TranslationGranule; -/// Describes the size of an address space. -pub struct AddressSpaceSize; +/// Describes properties of an address space. +pub struct AddressSpace; /// Architecture agnostic translation types. #[allow(missing_docs)] @@ -107,6 +119,15 @@ pub struct KernelVirtualLayout { // Public Code //-------------------------------------------------------------------------------------------------- +impl fmt::Display for MMUEnableError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"), + MMUEnableError::Other(x) => write!(f, "{}", x), + } + } +} + impl TranslationGranule { /// The granule's size. pub const SIZE: usize = Self::size_checked(); @@ -121,22 +142,18 @@ impl TranslationGranule { } } -impl AddressSpaceSize { +impl AddressSpace { /// The address space size. pub const SIZE: usize = Self::size_checked(); /// The address space shift, aka log2(size). - pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize; const fn size_checked() -> usize { assert!(AS_SIZE.is_power_of_two()); - assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two()); - assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two()); - // Must adhere to architectural restrictions. - assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE); - assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE); - assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0); + // Check for architectural restrictions as well. + Self::arch_address_space_size_sanity_checks(); AS_SIZE } diff --git a/14_exceptions_part2_peripheral_IRQs/src/synchronization.rs b/14_exceptions_part2_peripheral_IRQs/src/synchronization.rs index fe9d454a..94582732 100644 --- a/14_exceptions_part2_peripheral_IRQs/src/synchronization.rs +++ b/14_exceptions_part2_peripheral_IRQs/src/synchronization.rs @@ -139,3 +139,21 @@ impl interface::ReadWriteEx for InitStateLock { f(data) } } + +//-------------------------------------------------------------------------------------------------- +// Testing +//-------------------------------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use test_macros::kernel_test; + + /// InitStateLock must be transparent. + #[kernel_test] + fn init_state_lock_is_transparent() { + use core::mem::size_of; + + assert_eq!(size_of::>(), size_of::()); + } +} diff --git a/14_exceptions_part2_peripheral_IRQs/tests/02_exception_sync_page_fault.rs b/14_exceptions_part2_peripheral_IRQs/tests/02_exception_sync_page_fault.rs index 185089a3..f1535d34 100644 --- a/14_exceptions_part2_peripheral_IRQs/tests/02_exception_sync_page_fault.rs +++ b/14_exceptions_part2_peripheral_IRQs/tests/02_exception_sync_page_fault.rs @@ -29,7 +29,7 @@ unsafe fn kernel_init() -> ! { println!("Testing synchronous exception handling by causing a page fault"); println!("-------------------------------------------------------------------\n"); - if let Err(string) = memory::mmu::mmu().init() { + if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { println!("MMU: {}", string); cpu::qemu_exit_failure() } diff --git a/15_virtual_mem_part2_mmio_remap/README.md b/15_virtual_mem_part2_mmio_remap/README.md index 53eb8047..ec117146 100644 --- a/15_virtual_mem_part2_mmio_remap/README.md +++ b/15_virtual_mem_part2_mmio_remap/README.md @@ -16,7 +16,7 @@ - [Introduction](#introduction) - [Implementation](#implementation) - [A New Mapping API in `src/memory/mmu.rs`](#a-new-mapping-api-in-srcmemorymmutranslationtablers) - - [Using the new API in `bsp` code and drivers](#using-the-new-api-in-bsp-code-and-drivers) + - [The new APIs in action](#the-new-apis-in-action) - [Additional Changes](#additional-changes) - [Test it](#test-it) - [Diff to previous](#diff-to-previous) @@ -56,16 +56,16 @@ separation, this tutorial makes a start by changing the following things: 1. Instead of bulk-`identity mapping` the whole of the board's address space, only the particular parts that are needed will be mapped. -1. For now, the `kernel binary` stays identity mapped. This will be changed in the next tutorial as - it is a quite difficult and peculiar exercise to remap the kernel. +1. For now, the `kernel binary` stays identity mapped. This will be changed in the in the coming + tutorials as it is a quite difficult and peculiar exercise to remap the kernel. 1. Device `MMIO regions` are lazily remapped during the device driver's `init()`. 1. The remappings will populate the top of the virtual address space. In the `AArch64 MMU Driver`, we provide the top `256 MiB` for it. 1. It is possible to define the size of the virtual address space at compile time. We chose `8 GiB` for now, which means remapped MMIO virtual addresses will start at `7936 MiB` - (`0x1F0000000`). -1. We keep using `TTBR0` for the kernel page tables for now. This will be changed when we remap the - `kernel binary` in the next tutorial. + (`0x1_f000_0000`). +1. We keep using `TTBR0` for the kernel translation tables for now. This will be changed when we + remap the `kernel binary` in the coming tutorials. [ARM Cortex-A Series Programmer’s Guide for ARMv8-A]: https://developer.arm.com/documentation/den0024/latest/ [higher half kernel]: https://wiki.osdev.org/Higher_Half_Kernel @@ -80,10 +80,11 @@ kernel code** (`src/memory/**`). The way it worked was that the `architectural MMU code` would query the `bsp code` about the start and end of the physical address space, and any special regions in this space that need a mapping that _is not_ normal chacheable DRAM. It would then go ahead and map the whole address space at once -and never touch the page tables again during runtime. +and never touch the translation tables again during runtime. -Changing in this tutorial, **architecture** and **bsp** code will no longer talk to each other -directly. Instead, this is decoupled now through the kernel's **generic MMU subsystem code**. +Changing in this tutorial, **architecture** and **bsp** code will no longer autonomously create the +virtual memory mappings. Instead, this is now orchestrated by the kernel's **generic MMU subsystem +code**. ### A New Mapping API in `src/memory/mmu/translation_table.rs` @@ -93,7 +94,7 @@ First, we define an interface for operating on `translation tables`: /// Translation table operations. pub trait TranslationTable { /// Anything that needs to run before any of the other provided functions can be used. - unsafe fn init(&mut self); + fn init(&mut self); /// The translation table's base address to be used for programming the MMU. fn phys_base_address(&self) -> Address; @@ -107,12 +108,6 @@ pub trait TranslationTable { ) -> Result<(), &'static str>; /// Obtain a free virtual page slice in the MMIO region. - /// - /// The "MMIO region" is a distinct region of the implementor's choice, which allows - /// differentiating MMIO addresses from others. This can speed up debugging efforts. - /// Ideally, those MMIO addresses are also standing out visually so that a human eye can - /// identify them. For example, by allocating them from near the end of the virtual address - /// space. fn next_mmio_virt_page_slice( &mut self, num_pages: usize, @@ -123,11 +118,49 @@ pub trait TranslationTable { } ``` -The MMU driver (`src/_arch/_/memory/mmu.rs`) has one global instance for the kernel tables which -implements this interface, and which can be accessed by calling -`arch_mmu::kernel_translation_tables()` in the generic kernel code (`src/memory/mmu.rs`). From -there, we provide a couple of memory mapping functions that wrap around this interface , and which -are exported for the rest of the kernel to use: +In order to enable the generic kernel code to manipulate the kernel's translation tables, they must +first be made accessible. Until now, they were just a "hidden" struct in the `architectural` MMU +driver (`src/arch/.../memory/mmu.rs`). This made sense because the MMU driver code was the only code +that needed to be concerned with the table data structure, so having it accessible locally +simplified things. + +Since the tables need to be exposed to the rest of the kernel code now, it makes sense to move them +to `BSP` code. Because ultimately, it is the `BSP` that is defining the translation table's +properties, such as the size of the virtual address space that the tables need to cover. + +They are now defined in the global instances region of `src/bsp/.../memory/mmu.rs`. To control +access, they are guarded by an `InitStateLock`. + +```rust +//-------------------------------------------------------------------------------------------------- +// Global instances +//-------------------------------------------------------------------------------------------------- + +/// The kernel translation tables. +static KERNEL_TABLES: InitStateLock = + InitStateLock::new(KernelTranslationTable::new()); +``` + +The struct `KernelTranslationTable` is a type alias defined in the same file, which in turn gets its +definition from an associated type of type `KernelVirtAddrSpace`, which itself is a type alias of +`memory::mmu::AddressSpace`. I know this sounds horribly complicated, but in the end this is just +some layers of `const generics` whose implementation is scattered between `generic` and `arch` code. +This is done to (1) ensure a sane compile-time definition of the translation table struct (by doing +various bounds checks), and (2) to separate concerns between generic `MMU` code and specializations +that come from the `architectural` part. + +In the end, these tables can be accessed by calling `bsp::memory::mmu::kernel_translation_tables()`: + +```rust +/// Return a reference to the kernel's translation tables. +pub fn kernel_translation_tables() -> &'static InitStateLock { + &KERNEL_TABLES +} +``` + +Finally, the generic kernel code (`src/memory/mmu.rs`) now provides a couple of memory mapping +functions that access and manipulate this instance. They are exported for the rest of the kernel to +use: ```rust /// Raw mapping of virtual to physical pages in the kernel translation tables. @@ -135,8 +168,8 @@ are exported for the rest of the kernel to use: /// Prevents mapping into the MMIO range of the tables. pub unsafe fn kernel_map_pages_at( name: &'static str, - phys_pages: &PageSliceDescriptor, virt_pages: &PageSliceDescriptor, + phys_pages: &PageSliceDescriptor, attr: &AttributeFields, ) -> Result<(), &'static str>; @@ -148,20 +181,39 @@ pub unsafe fn kernel_map_mmio( phys_mmio_descriptor: &MMIODescriptor, ) -> Result, &'static str>; -/// Map the kernel's binary and enable the MMU. -pub unsafe fn kernel_map_binary_and_enable_mmu() -> Result<(), &'static str> ; +/// Map the kernel's binary. Returns the translation table's base address. +pub unsafe fn kernel_map_binary() -> Result, &'static str>; + +/// Enable the MMU and data + instruction caching. +pub unsafe fn enable_mmu_and_caching( + phys_tables_base_addr: Address, +) -> Result<(), MMUEnableError>; ``` -### Using the new API in `bsp` code and drivers +### The new APIs in action + +`kernel_map_binary()` and `enable_mmu_and_caching()` are used early in `kernel_init()` to set up +virtual memory: + +```rust +let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() { + Err(string) => panic!("Error mapping kernel binary: {}", string), + Ok(addr) => addr, +}; + +if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) { + panic!("Enabling MMU failed: {}", e); +} +``` -For now, there are two places where the new API is used. First, in `src/bsp/_/memory/mmu.rs`, which -provides a dedicated call to **map the kernel binary** (because it is the `BSP` that provides the -`linker script`, which in turn defines the final layout of the kernel in memory): +Both functions internally use `bsp` and `arch` specific code to achieve their goals. For example, +`memory::mmu::kernel_map_binary()` itself wraps around a `bsp` function of the same name +(`bsp::memory::mmu::kernel_map_binary()`): ```rust /// Map the kernel binary. pub unsafe fn kernel_map_binary() -> Result<(), &'static str> { - kernel_mmu::kernel_map_pages_at( + generic_mmu::kernel_map_pages_at( "Kernel boot-core stack", &virt_stack_page_desc(), &phys_stack_page_desc(), @@ -172,12 +224,12 @@ pub unsafe fn kernel_map_binary() -> Result<(), &'static str> { }, )?; - kernel_mmu::kernel_map_pages_at( + generic_mmu::kernel_map_pages_at( "Kernel code and RO data", // omitted for brevity. )?; - kernel_mmu::kernel_map_pages_at( + generic_mmu::kernel_map_pages_at( "Kernel data and bss", // omitted for brevity. )?; @@ -186,8 +238,8 @@ pub unsafe fn kernel_map_binary() -> Result<(), &'static str> { } ``` -Second, in device drivers, which now expect an `MMIODescriptor` type instead of a raw address. The -following is an example for the `UART`: +Another user of the new APIs are device drivers, which now expect an `MMIODescriptor` type instead +of a raw address. The following is an example for the `UART`: ```rust impl PL011Uart { @@ -234,7 +286,7 @@ through them: ## Test it When you load the kernel, you can now see that the driver's MMIO virtual addresses start at -`0x1F0000000`: +`0x1_f000_0000`: Raspberry Pi 3: @@ -254,21 +306,21 @@ Minipush 1.0 Raspberry Pi 3 [ML] Requesting binary -[MP] ⏩ Pushing 67 KiB ========================================🦀 100% 33 KiB/s Time: 00:00:02 +[MP] ⏩ Pushing 67 KiB =========================================🦀 100% 0 KiB/s Time: 00:00:00 [ML] Loaded! Executing the payload now -[ 3.064756] Booting on: Raspberry Pi 3 -[ 3.065839] MMU online: -[ 3.067010] ----------------------------------------------------------------------------------------------------------------- -[ 3.072868] Virtual Physical Size Attr Entity -[ 3.078725] ----------------------------------------------------------------------------------------------------------------- -[ 3.084585] 0x000070000..0x00007FFFF --> 0x000070000..0x00007FFFF | 64 KiB | C RW XN | Kernel boot-core stack -[ 3.089877] 0x000080000..0x00008FFFF --> 0x000080000..0x00008FFFF | 64 KiB | C RO X | Kernel code and RO data -[ 3.095213] 0x000090000..0x0001BFFFF --> 0x000090000..0x0001BFFFF | 1 MiB | C RW XN | Kernel data and bss -[ 3.100376] 0x1F0000000..0x1F000FFFF --> 0x03F200000..0x03F20FFFF | 64 KiB | Dev RW XN | BCM GPIO -[ 3.105060] | BCM PL011 UART -[ 3.110008] 0x1F0010000..0x1F001FFFF --> 0x03F000000..0x03F00FFFF | 64 KiB | Dev RW XN | BCM Peripheral Interrupt Controller -[ 3.115863] ----------------------------------------------------------------------------------------------------------------- +[ 0.786819] Booting on: Raspberry Pi 3 +[ 0.787092] MMU online: +[ 0.787384] ------------------------------------------------------------------------------------------------------------------------------------------- +[ 0.789128] Virtual Physical Size Attr Entity +[ 0.790873] ------------------------------------------------------------------------------------------------------------------------------------------- +[ 0.792618] 0x0000_0000_0007_0000..0x0000_0000_0007_ffff --> 0x00_0007_0000..0x00_0007_ffff | 64 KiB | C RW XN | Kernel boot-core stack +[ 0.794221] 0x0000_0000_0008_0000..0x0000_0000_0008_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data +[ 0.795835] 0x0000_0000_0009_0000..0x0000_0000_001b_ffff --> 0x00_0009_0000..0x00_001b_ffff | 1 MiB | C RW XN | Kernel data and bss +[ 0.797406] 0x0000_0001_f000_0000..0x0000_0001_f000_ffff --> 0x00_3f20_0000..0x00_3f20_ffff | 64 KiB | Dev RW XN | BCM GPIO +[ 0.798857] | BCM PL011 UART +[ 0.800374] 0x0000_0001_f001_0000..0x0000_0001_f001_ffff --> 0x00_3f00_0000..0x00_3f00_ffff | 64 KiB | Dev RW XN | BCM Peripheral Interrupt Controller +[ 0.802117] ------------------------------------------------------------------------------------------------------------------------------------------- ``` Raspberry Pi 4: @@ -289,22 +341,22 @@ Minipush 1.0 Raspberry Pi 4 [ML] Requesting binary -[MP] ⏩ Pushing 74 KiB ========================================🦀 100% 24 KiB/s Time: 00:00:03 +[MP] ⏩ Pushing 74 KiB =========================================🦀 100% 0 KiB/s Time: 00:00:00 [ML] Loaded! Executing the payload now -[ 3.379342] Booting on: Raspberry Pi 4 -[ 3.379731] MMU online: -[ 3.380902] ----------------------------------------------------------------------------------------------------------------- -[ 3.386759] Virtual Physical Size Attr Entity -[ 3.392616] ----------------------------------------------------------------------------------------------------------------- -[ 3.398475] 0x000070000..0x00007FFFF --> 0x000070000..0x00007FFFF | 64 KiB | C RW XN | Kernel boot-core stack -[ 3.403768] 0x000080000..0x00008FFFF --> 0x000080000..0x00008FFFF | 64 KiB | C RO X | Kernel code and RO data -[ 3.409104] 0x000090000..0x0001BFFFF --> 0x000090000..0x0001BFFFF | 1 MiB | C RW XN | Kernel data and bss -[ 3.414267] 0x1F0000000..0x1F000FFFF --> 0x0FE200000..0x0FE20FFFF | 64 KiB | Dev RW XN | BCM GPIO -[ 3.418951] | BCM PL011 UART -[ 3.423898] 0x1F0010000..0x1F001FFFF --> 0x0FF840000..0x0FF84FFFF | 64 KiB | Dev RW XN | GICD -[ 3.428409] | GICC -[ 3.432921] ----------------------------------------------------------------------------------------------------------------- +[ 0.853908] Booting on: Raspberry Pi 4 +[ 0.854007] MMU online: +[ 0.854299] ------------------------------------------------------------------------------------------------------------------------------------------- +[ 0.856043] Virtual Physical Size Attr Entity +[ 0.857788] ------------------------------------------------------------------------------------------------------------------------------------------- +[ 0.859533] 0x0000_0000_0007_0000..0x0000_0000_0007_ffff --> 0x00_0007_0000..0x00_0007_ffff | 64 KiB | C RW XN | Kernel boot-core stack +[ 0.861137] 0x0000_0000_0008_0000..0x0000_0000_0008_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data +[ 0.862750] 0x0000_0000_0009_0000..0x0000_0000_001b_ffff --> 0x00_0009_0000..0x00_001b_ffff | 1 MiB | C RW XN | Kernel data and bss +[ 0.864321] 0x0000_0001_f000_0000..0x0000_0001_f000_ffff --> 0x00_fe20_0000..0x00_fe20_ffff | 64 KiB | Dev RW XN | BCM GPIO +[ 0.865772] | BCM PL011 UART +[ 0.867289] 0x0000_0001_f001_0000..0x0000_0001_f001_ffff --> 0x00_ff84_0000..0x00_ff84_ffff | 64 KiB | Dev RW XN | GICD +[ 0.868697] | GICC +[ 0.870105] ------------------------------------------------------------------------------------------------------------------------------------------- ``` ## Diff to previous @@ -338,74 +390,82 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans + arch_mmu::{Granule512MiB, Granule64KiB}, + AccessPermissions, AttributeFields, MemAttributes, Page, PageSliceDescriptor, + }, -+ Address, AddressType, Physical, Virtual, ++ Address, Physical, Virtual, }, }; use core::convert; -@@ -117,11 +120,11 @@ +@@ -117,12 +120,9 @@ } - trait BaseAddr { -- fn base_addr_u64(&self) -> u64; -- fn base_addr_usize(&self) -> usize; -+ fn phys_base_addr(&self) -> Address; + trait StartAddr { +- fn phys_start_addr_u64(&self) -> u64; +- fn phys_start_addr_usize(&self) -> usize; ++ fn phys_start_addr(&self) -> Address; } --const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; -+const NUM_LVL2_TABLES: usize = -+ bsp::memory::mmu::KernelVirtAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; - +-const NUM_LVL2_TABLES: usize = bsp::memory::mmu::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; +- //-------------------------------------------------------------------------------------------------- // Public Definitions -@@ -137,6 +140,12 @@ + //-------------------------------------------------------------------------------------------------- +@@ -137,10 +137,13 @@ /// Table descriptors, covering 512 MiB windows. lvl2: [TableDescriptor; NUM_TABLES], -+ +-} + +-/// A translation table type for the kernel space. +-pub type KernelTranslationTable = FixedSizeTranslationTable; + /// Index of the next free MMIO page. + cur_l3_mmio_index: usize, + + /// Have the tables been initialized? + initialized: bool, - } ++} - /// A translation table type for the kernel space. -@@ -147,12 +156,9 @@ //-------------------------------------------------------------------------------------------------- + // Private Code +@@ -148,12 +151,8 @@ - impl BaseAddr for [T; N] { -- fn base_addr_u64(&self) -> u64 { + // The binary is still identity mapped, so we don't need to convert here. + impl StartAddr for [T; N] { +- fn phys_start_addr_u64(&self) -> u64 { - self as *const T as u64 - } - -- fn base_addr_usize(&self) -> usize { +- fn phys_start_addr_usize(&self) -> usize { - self as *const _ as usize -+ fn phys_base_addr(&self) -> Address { -+ // The binary is still identity mapped, so we don't need to convert here. ++ fn phys_start_addr(&self) -> Address { + Address::new(self as *const _ as usize) } } -@@ -225,20 +231,29 @@ +@@ -166,10 +165,10 @@ + } + + /// Create an instance pointing to the supplied address. +- pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self { ++ pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address) -> Self { + let val = InMemoryRegister::::new(0); + +- let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT; ++ let shifted = phys_next_lvl_table_addr.into_usize() >> Granule64KiB::SHIFT; + val.write( + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + + STAGE1_TABLE_DESCRIPTOR::TYPE::Table +@@ -226,7 +225,10 @@ } /// Create an instance. -- pub fn from_output_addr(output_addr: usize, attribute_fields: AttributeFields) -> Self { +- pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self { + pub fn from_output_addr( -+ output_addr: *const Page, ++ phys_output_addr: *const Page, + attribute_fields: &AttributeFields, + ) -> Self { let val = InMemoryRegister::::new(0); - let shifted = output_addr as u64 >> Granule64KiB::SHIFT; - val.write( - STAGE1_PAGE_DESCRIPTOR::VALID::True - + STAGE1_PAGE_DESCRIPTOR::AF::True -- + attribute_fields.into() -+ + attribute_fields.clone().into() - + STAGE1_PAGE_DESCRIPTOR::TYPE::Table - + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted), - ); + let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; +@@ -240,50 +242,193 @@ Self { value: val.get() } } @@ -418,21 +478,29 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans } //-------------------------------------------------------------------------------------------------- -@@ -246,44 +261,172 @@ + // Public Code //-------------------------------------------------------------------------------------------------- ++impl memory::mmu::AssociatedTranslationTable ++ for memory::mmu::AddressSpace ++where ++ [u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized, ++{ ++ type TableStartFromBottom = FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }>; ++} ++ impl FixedSizeTranslationTable { + // Reserve the last 256 MiB of the address space for MMIO mappings. + const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1; + const L3_MMIO_START_INDEX: usize = 8192 / 2; + /// Create an instance. - #[allow(clippy::assertions_on_constants)] ++ #[allow(clippy::assertions_on_constants)] pub const fn new() -> Self { + assert!(bsp::memory::mmu::KernelGranule::SIZE == Granule64KiB::SIZE); ++ + // Can't have a zero-sized address space. assert!(NUM_TABLES > 0); -- assert!((bsp::memory::mmu::KernelAddrSpaceSize::SIZE modulo Granule512MiB::SIZE) == 0); -+ assert!((bsp::memory::mmu::KernelVirtAddrSpaceSize::SIZE modulo Granule512MiB::SIZE) == 0); Self { lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], @@ -450,10 +518,10 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans - pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> { - for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { - *l2_entry = -- TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].base_addr_usize()); +- TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize()); + /// The start address of the table's MMIO range. + #[inline(always)] -+ const fn mmio_start_addr(&self) -> Address { ++ fn mmio_start_addr(&self) -> Address { + Address::new( + (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) + | (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT), @@ -462,7 +530,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans + + /// The inclusive end address of the table's MMIO range. + #[inline(always)] -+ const fn mmio_end_addr_inclusive(&self) -> Address { ++ fn mmio_end_addr_inclusive(&self) -> Address { + Address::new( + (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) + | (8191 << Granule64KiB::SHIFT) @@ -472,12 +540,13 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans + + /// Helper to calculate the lvl2 and lvl3 indices from an address. + #[inline(always)] -+ fn lvl2_lvl3_index_from( ++ fn lvl2_lvl3_index_from( + &self, -+ addr: *const Page, ++ addr: *const Page, + ) -> Result<(usize, usize), &'static str> { -+ let lvl2_index = addr as usize >> Granule512MiB::SHIFT; -+ let lvl3_index = (addr as usize & Granule512MiB::MASK) >> Granule64KiB::SHIFT; ++ let addr = addr as usize; ++ let lvl2_index = addr >> Granule512MiB::SHIFT; ++ let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT; + + if lvl2_index > (NUM_TABLES - 1) { + return Err("Virtual page is out of bounds of translation table"); @@ -501,36 +570,32 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans +//------------------------------------------------------------------------------ +// OS Interface Code +//------------------------------------------------------------------------------ -+ + +- for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { +- let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT); +impl memory::mmu::translation_table::interface::TranslationTable + for FixedSizeTranslationTable +{ -+ unsafe fn init(&mut self) { ++ fn init(&mut self) { + if self.initialized { + return; + } - -- for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { -- let virt_addr = (l2_nr << Granule512MiB::SHIFT) + (l3_nr << Granule64KiB::SHIFT); ++ + // Populate the l2 entries. + for (lvl2_nr, lvl2_entry) in self.lvl2.iter_mut().enumerate() { -+ let desc = TableDescriptor::from_next_lvl_table_addr( -+ self.lvl3[lvl2_nr].phys_base_addr().into_usize(), -+ ); ++ let desc = ++ TableDescriptor::from_next_lvl_table_addr(self.lvl3[lvl2_nr].phys_start_addr()); + *lvl2_entry = desc; + } + + self.cur_l3_mmio_index = Self::L3_MMIO_START_INDEX; + self.initialized = true; + } - -- let (output_addr, attribute_fields) = -- bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?; ++ + fn phys_base_address(&self) -> Address { -+ self.lvl2.phys_base_addr() ++ self.lvl2.phys_start_addr() + } - -- *l3_entry = PageDescriptor::from_output_addr(output_addr, attribute_fields); ++ + unsafe fn map_pages_at( + &mut self, + virt_pages: &PageSliceDescriptor, @@ -542,15 +607,18 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans + let p = phys_pages.as_slice(); + let v = virt_pages.as_slice(); + -+ if p.len() != v.len() { -+ return Err("Tried to map page slices with unequal sizes"); -+ } -+ + // No work to do for empty slices. -+ if p.is_empty() { ++ if v.is_empty() { + return Ok(()); + } -+ + +- let (phys_output_addr, attribute_fields) = +- bsp::memory::mmu::virt_mem_layout().virt_addr_properties(virt_addr)?; ++ if v.len() != p.len() { ++ return Err("Tried to map page slices with unequal sizes"); ++ } + +- *l3_entry = PageDescriptor::from_output_addr(phys_output_addr, &attribute_fields); + if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page() { + return Err("Tried to map outside of physical address space"); + } @@ -569,8 +637,8 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans } - /// The translation table's base address to be used for programming the MMU. -- pub fn base_address(&self) -> u64 { -- self.lvl2.base_addr_u64() +- pub fn phys_base_address(&self) -> u64 { +- self.lvl2.phys_start_addr_u64() + fn next_mmio_virt_page_slice( + &mut self, + num_pages: usize, @@ -585,14 +653,13 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans + return Err("Not enough MMIO space left"); + } + -+ let addr = (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) -+ | (self.cur_l3_mmio_index << Granule64KiB::SHIFT); ++ let addr = Address::new( ++ (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) ++ | (self.cur_l3_mmio_index << Granule64KiB::SHIFT), ++ ); + self.cur_l3_mmio_index += num_pages; + -+ Ok(PageSliceDescriptor::from_addr( -+ Address::new(addr), -+ num_pages, -+ )) ++ Ok(PageSliceDescriptor::from_addr(addr, num_pages)) + } + + fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor) -> bool { @@ -609,11 +676,11 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans } } -@@ -292,6 +435,9 @@ +@@ -292,6 +437,9 @@ //-------------------------------------------------------------------------------------------------- #[cfg(test)] -+pub type MinSizeKernelTranslationTable = FixedSizeTranslationTable<1>; ++pub type MinSizeTranslationTable = FixedSizeTranslationTable<1>; + +#[cfg(test)] mod tests { @@ -623,93 +690,88 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu/trans diff -uNr 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs 15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu.rs --- 14_exceptions_part2_peripheral_IRQs/src/_arch/aarch64/memory/mmu.rs +++ 15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu.rs -@@ -15,7 +15,11 @@ +@@ -15,7 +15,7 @@ use crate::{ bsp, memory, - memory::mmu::{translation_table::KernelTranslationTable, TranslationGranule}, -+ memory::{ -+ mmu::{translation_table::KernelTranslationTable, TranslationGranule}, -+ Address, Physical, -+ }, -+ synchronization::InitStateLock, ++ memory::{mmu::TranslationGranule, Address, Physical}, }; + use core::intrinsics::unlikely; use cortex_a::{barrier, regs::*}; +@@ -45,13 +45,6 @@ + // Global instances + //-------------------------------------------------------------------------------------------------- -@@ -37,7 +41,7 @@ - pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB - - /// The max supported address space size. --pub const MAX_ADDR_SPACE_SIZE: usize = 32 * 1024 * 1024 * 1024; // 32 GiB -+pub const MAX_ADDR_SPACE_SIZE: usize = 8 * 1024 * 1024 * 1024; // 8 GiB - - /// The supported address space size granule. - pub type AddrSpaceSizeGranule = Granule512MiB; -@@ -58,7 +62,8 @@ - /// # Safety - /// - /// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0". +-/// The kernel translation tables. +-/// +-/// # Safety +-/// +-/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0". -static mut KERNEL_TABLES: KernelTranslationTable = KernelTranslationTable::new(); -+static KERNEL_TABLES: InitStateLock = -+ InitStateLock::new(KernelTranslationTable::new()); - +- static MMU: MemoryManagementUnit = MemoryManagementUnit; -@@ -83,7 +88,7 @@ + //-------------------------------------------------------------------------------------------------- +@@ -86,7 +79,7 @@ + /// Configure various settings of stage 1 of the EL1 translation regime. fn configure_translation_control(&self) { - let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); -- let t0sz = (64 - bsp::memory::mmu::KernelAddrSpaceSize::SHIFT) as u64; -+ let t0sz = (64 - bsp::memory::mmu::KernelVirtAddrSpaceSize::SHIFT) as u64; +- let t0sz = (64 - bsp::memory::mmu::KernelAddrSpace::SIZE_SHIFT) as u64; ++ let t0sz = (64 - bsp::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64; TCR_EL1.write( - TCR_EL1::TBI0::Ignored -@@ -103,6 +108,11 @@ - // Public Code - //-------------------------------------------------------------------------------------------------- - -+/// Return a guarded reference to the kernel's translation tables. -+pub fn kernel_translation_tables() -> &'static InitStateLock { -+ &KERNEL_TABLES -+} -+ - /// Return a reference to the MMU instance. - pub fn mmu() -> &'static impl memory::mmu::interface::MMU { - &MMU -@@ -113,7 +123,10 @@ - //------------------------------------------------------------------------------ + TCR_EL1::TBI0::Used +@@ -118,7 +111,10 @@ + use memory::mmu::MMUEnableError; impl memory::mmu::interface::MMU for MemoryManagementUnit { -- unsafe fn init(&self) -> Result<(), &'static str> { -+ unsafe fn enable( +- unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError> { ++ unsafe fn enable_mmu_and_caching( + &self, -+ kernel_table_phys_base_addr: Address, -+ ) -> Result<(), &'static str> { - // Fail early if translation granule is not supported. Both RPis support it, though. - if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { - return Err("Translation granule not supported in HW"); -@@ -122,11 +135,8 @@ ++ phys_tables_base_addr: Address, ++ ) -> Result<(), MMUEnableError> { + if unlikely(self.is_enabled()) { + return Err(MMUEnableError::AlreadyEnabled); + } +@@ -133,13 +129,8 @@ // Prepare the memory attribute indirection register. self.set_up_mair(); - // Populate translation tables. -- KERNEL_TABLES.populate_tt_entries()?; +- KERNEL_TABLES +- .populate_tt_entries() +- .map_err(|e| MMUEnableError::Other(e))?; - // Set the "Translation Table Base Register". -- TTBR0_EL1.set_baddr(KERNEL_TABLES.base_address()); -+ TTBR0_EL1.set_baddr(kernel_table_phys_base_addr.into_usize() as u64); +- TTBR0_EL1.set_baddr(KERNEL_TABLES.phys_base_address()); ++ TTBR0_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64); self.configure_translation_control(); -@@ -158,7 +168,7 @@ - #[kernel_test] - fn kernel_tables_in_bss() { - let bss_range = bsp::memory::bss_range_inclusive(); -- let kernel_tables_addr = unsafe { &KERNEL_TABLES as *const _ as usize as *mut u64 }; -+ let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64; - - assert!(bss_range.contains(&kernel_tables_addr)); +@@ -162,22 +153,3 @@ + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) } + } +- +-//-------------------------------------------------------------------------------------------------- +-// Testing +-//-------------------------------------------------------------------------------------------------- +- +-#[cfg(test)] +-mod tests { +- use super::*; +- use test_macros::kernel_test; +- +- /// Check if KERNEL_TABLES is in .bss. +- #[kernel_test] +- fn kernel_tables_in_bss() { +- let bss_range = bsp::memory::bss_range_inclusive(); +- let kernel_tables_addr = unsafe { &KERNEL_TABLES as *const _ as usize as *mut u64 }; +- +- assert!(bss_range.contains(&kernel_tables_addr)); +- } +-} diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gicc.rs 15_virtual_mem_part2_mmio_remap/src/bsp/device_driver/arm/gicv2/gicc.rs --- 14_exceptions_part2_peripheral_IRQs/src/bsp/device_driver/arm/gicv2/gicc.rs @@ -1305,7 +1367,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/console.rs 15_ use super::memory; -use crate::{bsp::device_driver, console}; -+use crate::{bsp::device_driver, console, cpu}; ++use crate::{bsp::device_driver, console, cpu, driver}; use core::fmt; //-------------------------------------------------------------------------------------------------- @@ -1315,7 +1377,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/console.rs 15_ pub unsafe fn panic_console_out() -> impl fmt::Write { - let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START); - let mut panic_uart = device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START); -+ use crate::driver::interface::DeviceDriver; ++ use driver::interface::DeviceDriver; + let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.into_usize()); + let mut panic_uart = @@ -1379,7 +1441,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/link.ld 15_vir diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs 15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs --- 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs +++ 15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs -@@ -4,70 +4,131 @@ +@@ -4,70 +4,157 @@ //! BSP Memory Management Unit. @@ -1389,29 +1451,44 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs +use crate::{ + common, + memory::{ -+ mmu as kernel_mmu, ++ mmu as generic_mmu, + mmu::{ -+ AccessPermissions, AddressSpaceSize, AttributeFields, MemAttributes, Page, -+ PageSliceDescriptor, TranslationGranule, ++ AccessPermissions, AddressSpace, AssociatedTranslationTable, AttributeFields, ++ MemAttributes, Page, PageSliceDescriptor, TranslationGranule, + }, + Physical, Virtual, + }, ++ synchronization::InitStateLock, +}; ++ ++//-------------------------------------------------------------------------------------------------- ++// Private Definitions ++//-------------------------------------------------------------------------------------------------- ++ ++type KernelTranslationTable = ++ ::TableStartFromBottom; //-------------------------------------------------------------------------------------------------- // Public Definitions //-------------------------------------------------------------------------------------------------- --/// The address space size chosen by this BSP. --pub type KernelAddrSpaceSize = AddressSpaceSize<{ memory_map::END_INCLUSIVE + 1 }>; -- --const NUM_MEM_RANGES: usize = 2; +-/// The kernel's address space defined by this BSP. +-pub type KernelAddrSpace = AddressSpace<{ memory_map::END_INCLUSIVE + 1 }>; +/// The translation granule chosen by this BSP. This will be used everywhere else in the kernel to +/// derive respective data structures and their sizes. For example, the `crate::memory::mmu::Page`. +pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>; +-const NUM_MEM_RANGES: usize = 2; ++/// The kernel's virtual address space defined by this BSP. ++pub type KernelVirtAddrSpace = AddressSpace<{ 8 * 1024 * 1024 * 1024 }>; + -/// The virtual memory layout. --/// ++//-------------------------------------------------------------------------------------------------- ++// Global instances ++//-------------------------------------------------------------------------------------------------- ++ ++/// The kernel translation tables. + /// -/// The layout must contain only special ranges, aka anything that is _not_ normal cacheable DRAM. -/// It is agnostic of the paging granularity that the architecture's MMU will use. -pub static LAYOUT: KernelVirtualLayout = KernelVirtualLayout::new( @@ -1439,8 +1516,12 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs - }, - ], -); -+/// The address space size chosen by this BSP. -+pub type KernelVirtAddrSpaceSize = AddressSpaceSize<{ 8 * 1024 * 1024 * 1024 }>; ++/// It is mandatory that InitStateLock is transparent. ++/// ++/// That is, `size_of(InitStateLock) == size_of(KernelTranslationTable)`. ++/// There is a unit tests that checks this porperty. ++static KERNEL_TABLES: InitStateLock = ++ InitStateLock::new(KernelTranslationTable::new()); //-------------------------------------------------------------------------------------------------- // Private Code @@ -1477,10 +1558,8 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs + let num_pages = size_to_num_pages(super::data_size()); + + PageSliceDescriptor::from_addr(super::virt_data_start(), num_pages) - } - --fn mmio_range_inclusive() -> RangeInclusive { -- RangeInclusive::new(memory_map::mmio::START, memory_map::mmio::END_INCLUSIVE) ++} ++ +// The binary is still identity mapped, so we don't need to convert in the following. + +/// The boot core's stack. @@ -1491,8 +1570,10 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs +/// The Read-Only (RO) pages of the kernel binary. +fn phys_ro_page_desc() -> PageSliceDescriptor { + virt_ro_page_desc().into() -+} -+ + } + +-fn mmio_range_inclusive() -> RangeInclusive { +- RangeInclusive::new(memory_map::mmio::START, memory_map::mmio::END_INCLUSIVE) +/// The data pages of the kernel binary. +fn phys_data_page_desc() -> PageSliceDescriptor { + virt_data_page_desc().into() @@ -1505,6 +1586,11 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs -/// Return a reference to the virtual memory layout. -pub fn virt_mem_layout() -> &'static KernelVirtualLayout { - &LAYOUT ++/// Return a reference to the kernel's translation tables. ++pub fn kernel_translation_tables() -> &'static InitStateLock { ++ &KERNEL_TABLES ++} ++ +/// Pointer to the last page of the physical address space. +pub fn phys_addr_space_end_page() -> *const Page { + common::align_down( @@ -1519,7 +1605,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs +/// +/// - Any miscalculation or attribute error will likely be fatal. Needs careful manual checking. +pub unsafe fn kernel_map_binary() -> Result<(), &'static str> { -+ kernel_mmu::kernel_map_pages_at( ++ generic_mmu::kernel_map_pages_at( + "Kernel boot-core stack", + &virt_stack_page_desc(), + &phys_stack_page_desc(), @@ -1530,7 +1616,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs + }, + )?; + -+ kernel_mmu::kernel_map_pages_at( ++ generic_mmu::kernel_map_pages_at( + "Kernel code and RO data", + &virt_ro_page_desc(), + &phys_ro_page_desc(), @@ -1541,7 +1627,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs + }, + )?; + -+ kernel_mmu::kernel_map_pages_at( ++ generic_mmu::kernel_map_pages_at( + "Kernel data and bss", + &virt_data_page_desc(), + &phys_data_page_desc(), @@ -1556,19 +1642,19 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs } //-------------------------------------------------------------------------------------------------- -@@ -82,14 +143,12 @@ +@@ -82,14 +169,12 @@ /// Check alignment of the kernel's virtual memory layout sections. #[kernel_test] fn virt_mem_layout_sections_are_64KiB_aligned() { - const SIXTYFOUR_KIB: usize = 65536; +- +- for i in LAYOUT.inner().iter() { +- let start: usize = *(i.virtual_range)().start(); +- let end: usize = *(i.virtual_range)().end() + 1; + for i in [virt_stack_page_desc, virt_ro_page_desc, virt_data_page_desc].iter() { + let start: usize = i().start_addr().into_usize(); + let end: usize = i().end_addr().into_usize(); -- for i in LAYOUT.inner().iter() { -- let start: usize = *(i.virtual_range)().start(); -- let end: usize = *(i.virtual_range)().end() + 1; -- - assert_eq!(start modulo SIXTYFOUR_KIB, 0); - assert_eq!(end modulo SIXTYFOUR_KIB, 0); + assert_eq!(start modulo KernelGranule::SIZE, 0); @@ -1576,7 +1662,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs assert!(end >= start); } } -@@ -97,17 +156,18 @@ +@@ -97,18 +182,28 @@ /// Ensure the kernel's virtual memory layout is free of overlaps. #[kernel_test] fn virt_mem_layout_has_no_overlaps() { @@ -1592,20 +1678,30 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory/mmu.rs - assert!(!second_range().contains(first_range().start())); - assert!(!second_range().contains(first_range().end())); + let layout = [ -+ virt_stack_page_desc().into_usize_range_inclusive(), -+ virt_ro_page_desc().into_usize_range_inclusive(), -+ virt_data_page_desc().into_usize_range_inclusive(), ++ virt_stack_page_desc(), ++ virt_ro_page_desc(), ++ virt_data_page_desc(), + ]; + + for (i, first_range) in layout.iter().enumerate() { + for second_range in layout.iter().skip(i + 1) { -+ assert!(!first_range.contains(second_range.start())); -+ assert!(!first_range.contains(second_range.end())); -+ assert!(!second_range.contains(first_range.start())); -+ assert!(!second_range.contains(first_range.end())); ++ assert!(!first_range.contains(second_range.start_addr())); ++ assert!(!first_range.contains(second_range.end_addr_inclusive())); ++ assert!(!second_range.contains(first_range.start_addr())); ++ assert!(!second_range.contains(first_range.end_addr_inclusive())); } } } ++ ++ /// Check if KERNEL_TABLES is in .bss. ++ #[kernel_test] ++ fn kernel_tables_in_bss() { ++ let bss_range = super::super::bss_range_inclusive(); ++ let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64; ++ ++ assert!(bss_range.contains(&kernel_tables_addr)); ++ } + } diff -uNr 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs 15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory.rs --- 14_exceptions_part2_peripheral_IRQs/src/bsp/raspberrypi/memory.rs @@ -1939,15 +2035,16 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/driver.rs 15_virtual_mem_part2 diff -uNr 14_exceptions_part2_peripheral_IRQs/src/lib.rs 15_virtual_mem_part2_mmio_remap/src/lib.rs --- 14_exceptions_part2_peripheral_IRQs/src/lib.rs +++ 15_virtual_mem_part2_mmio_remap/src/lib.rs -@@ -112,6 +112,7 @@ +@@ -112,6 +112,8 @@ #![allow(clippy::clippy::upper_case_acronyms)] #![allow(incomplete_features)] #![feature(asm)] ++#![feature(const_evaluatable_checked)] +#![feature(const_fn)] #![feature(const_fn_fn_ptr_basics)] #![feature(const_generics)] #![feature(const_panic)] -@@ -133,6 +134,7 @@ +@@ -133,6 +135,7 @@ mod synchronization; pub mod bsp; @@ -1959,7 +2056,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/lib.rs 15_virtual_mem_part2_mm diff -uNr 14_exceptions_part2_peripheral_IRQs/src/main.rs 15_virtual_mem_part2_mmio_remap/src/main.rs --- 14_exceptions_part2_peripheral_IRQs/src/main.rs +++ 15_virtual_mem_part2_mmio_remap/src/main.rs -@@ -26,21 +26,34 @@ +@@ -26,21 +26,39 @@ #[no_mangle] unsafe fn kernel_init() -> ! { use driver::interface::DriverManager; @@ -1967,14 +2064,18 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/main.rs 15_virtual_mem_part2_m exception::handling_init(); -- if let Err(string) = memory::mmu::mmu().init() { +- if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { - panic!("MMU: {}", string); -+ if let Err(string) = memory::mmu::kernel_map_binary_and_enable_mmu() { -+ panic!("Enabling MMU failed: {}", string); - } -+ // Printing will silently fail fail from here on, because the driver's MMIO is not remapped yet. - -- for i in bsp::driver::driver_manager().all_device_drivers().iter() { ++ let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() { ++ Err(string) => panic!("Error mapping kernel binary: {}", string), ++ Ok(addr) => addr, ++ }; ++ ++ if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) { ++ panic!("Enabling MMU failed: {}", e); ++ } ++ // Printing will silently fail from here on, because the driver's MMIO is not remapped yet. ++ + // Bring up the drivers needed for printing first. + for i in bsp::driver::driver_manager() + .early_print_device_drivers() @@ -1982,10 +2083,11 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/main.rs 15_virtual_mem_part2_m + { + // Any encountered errors cannot be printed yet, obviously, so just safely park the CPU. + i.init().unwrap_or_else(|_| cpu::wait_forever()); -+ } + } + bsp::driver::driver_manager().post_early_print_device_driver_init(); + // Printing available again from here on. -+ + +- for i in bsp::driver::driver_manager().all_device_drivers().iter() { + // Now bring up the remaining drivers. + for i in bsp::driver::driver_manager() + .non_early_print_device_drivers() @@ -2000,7 +2102,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/main.rs 15_virtual_mem_part2_m // Let device drivers register and enable their handlers with the interrupt controller. for i in bsp::driver::driver_manager().all_device_drivers() { -@@ -66,8 +79,8 @@ +@@ -66,8 +84,8 @@ info!("Booting on: {}", bsp::board_name()); @@ -2129,12 +2231,12 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1 + const KIB_RSHIFT: u32 = 10; // log2(1024). + const MIB_RSHIFT: u32 = 20; // log2(1024 * 1024). + -+ info!(" -----------------------------------------------------------------------------------------------------------------"); ++ info!(" -------------------------------------------------------------------------------------------------------------------------------------------"); + info!( -+ " {:^24} {:^24} {:^7} {:^9} {:^35}", ++ " {:^44} {:^30} {:^7} {:^9} {:^35}", + "Virtual", "Physical", "Size", "Attr", "Entity" + ); -+ info!(" -----------------------------------------------------------------------------------------------------------------"); ++ info!(" -------------------------------------------------------------------------------------------------------------------------------------------"); + + for i in self + .inner @@ -2142,10 +2244,10 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1 + .filter(|x| x.is_some()) + .map(|x| x.unwrap()) + { -+ let virt_start = i.virt_start_addr.into_usize(); ++ let virt_start = i.virt_start_addr; + let virt_end_inclusive = virt_start + i.phys_pages.size() - 1; -+ let phys_start = i.phys_pages.start_addr().into_usize(); -+ let phys_end_inclusive = i.phys_pages.end_addr_inclusive().into_usize(); ++ let phys_start = i.phys_pages.start_addr(); ++ let phys_end_inclusive = i.phys_pages.end_addr_inclusive(); + let size = i.phys_pages.size(); + + let (size, unit) = if (size >> MIB_RSHIFT) > 0 { @@ -2173,7 +2275,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1 + }; + + info!( -+ " {:#011X}..{:#011X} --> {:#011X}..{:#011X} | \ ++ " {}..{} --> {}..{} | \ + {: >3} {} | {: <3} {} {: <2} | {}", + virt_start, + virt_end_inclusive, @@ -2190,14 +2292,14 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1 + for k in i.users[1..].iter() { + if let Some(additional_user) = *k { + info!( -+ " | {}", ++ " | {}", + additional_user + ); + } + } + } + -+ info!(" -----------------------------------------------------------------------------------------------------------------"); ++ info!(" -------------------------------------------------------------------------------------------------------------------------------------------"); + } +} + @@ -2241,7 +2343,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/mapping_record.rs 1 diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/translation_table.rs 15_virtual_mem_part2_mmio_remap/src/memory/mmu/translation_table.rs --- 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/translation_table.rs +++ 15_virtual_mem_part2_mmio_remap/src/memory/mmu/translation_table.rs -@@ -8,7 +8,104 @@ +@@ -8,7 +8,105 @@ #[path = "../../_arch/aarch64/memory/mmu/translation_table.rs"] mod arch_translation_table; @@ -2253,7 +2355,9 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/translation_table.r //-------------------------------------------------------------------------------------------------- // Architectural Public Reexports //-------------------------------------------------------------------------------------------------- - pub use arch_translation_table::KernelTranslationTable; +-pub use arch_translation_table::KernelTranslationTable; ++#[cfg(target_arch = "aarch64")] ++pub use arch_translation_table::FixedSizeTranslationTable; + +//-------------------------------------------------------------------------------------------------- +// Public Definitions @@ -2271,7 +2375,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/translation_table.r + /// + /// - Implementor must ensure that this function can run only once or is harmless if invoked + /// multiple times. -+ unsafe fn init(&mut self); ++ fn init(&mut self); + + /// The translation table's base address to be used for programming the MMU. + fn phys_base_address(&self) -> Address; @@ -2317,17 +2421,17 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/translation_table.r +mod tests { + use super::*; + use crate::bsp; -+ use arch_translation_table::MinSizeKernelTranslationTable; ++ use arch_translation_table::MinSizeTranslationTable; + use interface::TranslationTable; + use test_macros::kernel_test; + -+ /// Sanity checks for the kernel TranslationTable implementation. ++ /// Sanity checks for the TranslationTable implementation. + #[kernel_test] + fn translationtable_implementation_sanity() { -+ // Need to take care that `tables` fits into the stack. -+ let mut tables = MinSizeKernelTranslationTable::new(); ++ // This will occupy a lot of space on the stack. ++ let mut tables = MinSizeTranslationTable::new(); + -+ unsafe { tables.init() }; ++ tables.init(); + + let x = tables.next_mmio_virt_page_slice(0); + assert!(x.is_err()); @@ -2350,7 +2454,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/translation_table.r diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 15_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs --- 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs +++ 15_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs -@@ -0,0 +1,213 @@ +@@ -0,0 +1,210 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2020-2021 Andre Richter @@ -2361,7 +2465,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 15_virtual + bsp, common, + memory::{Address, AddressType, Physical, Virtual}, +}; -+use core::{convert::From, marker::PhantomData, ops::RangeInclusive}; ++use core::{convert::From, marker::PhantomData}; + +//-------------------------------------------------------------------------------------------------- +// Public Definitions @@ -2474,6 +2578,11 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 15_virtual + self.start + (self.size() - 1) + } + ++ /// Check if an address is contained within this descriptor. ++ pub fn contains(&self, addr: Address) -> bool { ++ (addr >= self.start_addr()) && (addr <= self.end_addr_inclusive()) ++ } ++ + /// Return a non-mutable slice of Pages. + /// + /// # Safety @@ -2482,14 +2591,6 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 15_virtual + pub unsafe fn as_slice(&self) -> &[Page] { + core::slice::from_raw_parts(self.first_page_ptr(), self.num_pages) + } -+ -+ /// Return the inclusive address range of the slice. -+ pub fn into_usize_range_inclusive(self) -> RangeInclusive { -+ RangeInclusive::new( -+ self.start_addr().into_usize(), -+ self.end_addr_inclusive().into_usize(), -+ ) -+ } +} + +impl From> for PageSliceDescriptor { @@ -2568,7 +2669,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu/types.rs 15_virtual diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_part2_mmio_remap/src/memory/mmu.rs --- 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs +++ 15_virtual_mem_part2_mmio_remap/src/memory/mmu.rs -@@ -3,29 +3,22 @@ +@@ -3,29 +3,23 @@ // Copyright (c) 2020-2021 Andre Richter //! Memory Management Unit. @@ -2597,6 +2698,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p + memory::{Address, Physical, Virtual}, + synchronization, warn, +}; ++use core::fmt; -//-------------------------------------------------------------------------------------------------- -// Architectural Public Reexports @@ -2606,33 +2708,28 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p //-------------------------------------------------------------------------------------------------- // Public Definitions -@@ -33,16 +26,20 @@ - - /// Memory Management interfaces. - pub mod interface { -+ use super::*; +@@ -45,13 +39,15 @@ /// MMU functions. pub trait MMU { - /// Called by the kernel during early init. Supposed to take the translation tables from the - /// `BSP`-supplied `virt_mem_layout()` and install/activate them for the respective MMU. -+ /// Turns on the MMU. ++ /// Turns on the MMU for the first time and enables data and instruction caching. /// /// # Safety /// -+ /// - Must only be called after the kernel translation tables have been init()'ed. /// - Changes the HW's global state. -- unsafe fn init(&self) -> Result<(), &'static str>; -+ unsafe fn enable( +- unsafe fn enable_mmu_and_caching(&self) -> Result<(), MMUEnableError>; ++ unsafe fn enable_mmu_and_caching( + &self, -+ kernel_table_phys_base_addr: Address, -+ ) -> Result<(), &'static str>; - } - } ++ phys_tables_base_addr: Address, ++ ) -> Result<(), MMUEnableError>; -@@ -52,55 +49,35 @@ - /// Describes the size of an address space. - pub struct AddressSpaceSize; + /// Returns true if the MMU is enabled, false otherwise. + fn is_enabled(&self) -> bool; +@@ -64,55 +60,43 @@ + /// Describes properties of an address space. + pub struct AddressSpace; -/// Architecture agnostic translation types. -#[allow(missing_docs)] @@ -2665,8 +2762,14 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p - pub mem_attributes: MemAttributes, - pub acc_perms: AccessPermissions, - pub execute_never: bool, --} -- ++/// Intended to be implemented for [`AddressSpace`]. ++pub trait AssociatedTranslationTable { ++ /// A translation table whose address range is: ++ /// ++ /// [0, AS_SIZE - 1] ++ type TableStartFromBottom; + } + -/// Architecture agnostic descriptor for a memory range. -#[allow(missing_docs)] -pub struct TranslationDescriptor { @@ -2675,11 +2778,6 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p - pub physical_range_translation: Translation, - pub attribute_fields: AttributeFields, -} -- --/// Type for expressing the kernel's virtual memory layout. --pub struct KernelVirtualLayout { -- /// The last (inclusive) address of the address space. -- max_virt_addr_inclusive: usize, +//-------------------------------------------------------------------------------------------------- +// Private Code +//-------------------------------------------------------------------------------------------------- @@ -2701,9 +2799,13 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p + phys_pages: &PageSliceDescriptor, + attr: &AttributeFields, +) -> Result<(), &'static str> { -+ arch_mmu::kernel_translation_tables() ++ bsp::memory::mmu::kernel_translation_tables() + .write(|tables| tables.map_pages_at(virt_pages, phys_pages, attr))?; -+ + +-/// Type for expressing the kernel's virtual memory layout. +-pub struct KernelVirtualLayout { +- /// The last (inclusive) address of the address space. +- max_virt_addr_inclusive: usize, + if let Err(x) = mapping_record::kernel_add(name, virt_pages, phys_pages, attr) { + warn!("{}", x); + } @@ -2714,7 +2816,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p } //-------------------------------------------------------------------------------------------------- -@@ -111,6 +88,9 @@ +@@ -132,6 +116,9 @@ /// The granule's size. pub const SIZE: usize = Self::size_checked(); @@ -2724,7 +2826,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p /// The granule's shift, aka log2(size). pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; -@@ -142,110 +122,89 @@ +@@ -159,110 +146,103 @@ } } @@ -2848,7 +2950,7 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p + phys_pages: &PageSliceDescriptor, + attr: &AttributeFields, +) -> Result<(), &'static str> { -+ let is_mmio = arch_mmu::kernel_translation_tables() ++ let is_mmio = bsp::memory::mmu::kernel_translation_tables() + .read(|tables| tables.is_virt_page_slice_mmio(virt_pages)); + if is_mmio { + return Err("Attempt to manually map into MMIO region"); @@ -2881,8 +2983,9 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p + addr + // Otherwise, allocate a new virtual page slice and map it. + } else { -+ let virt_pages: PageSliceDescriptor = arch_mmu::kernel_translation_tables() -+ .write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?; ++ let virt_pages: PageSliceDescriptor = ++ bsp::memory::mmu::kernel_translation_tables() ++ .write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?; + + kernel_map_pages_at_unchecked( + name, @@ -2901,19 +3004,32 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p + Ok(virt_addr + offset_into_start_page) +} + -+/// Map the kernel's binary and enable the MMU. ++/// Map the kernel's binary. Returns the translation table's base address. +/// +/// # Safety +/// -+/// - Crucial function during kernel init. Changes the the complete memory view of the processor. -+pub unsafe fn kernel_map_binary_and_enable_mmu() -> Result<(), &'static str> { -+ let phys_base_addr = arch_mmu::kernel_translation_tables().write(|tables| { -+ tables.init(); -+ tables.phys_base_address() -+ }); ++/// - See [`bsp::memory::mmu::kernel_map_binary()`]. ++pub unsafe fn kernel_map_binary() -> Result, &'static str> { ++ let phys_kernel_tables_base_addr = ++ bsp::memory::mmu::kernel_translation_tables().write(|tables| { ++ tables.init(); ++ tables.phys_base_address() ++ }); + + bsp::memory::mmu::kernel_map_binary()?; -+ arch_mmu::mmu().enable(phys_base_addr) ++ ++ Ok(phys_kernel_tables_base_addr) ++} ++ ++/// Enable the MMU and data + instruction caching. ++/// ++/// # Safety ++/// ++/// - Crucial function during kernel init. Changes the the complete memory view of the processor. ++pub unsafe fn enable_mmu_and_caching( ++ phys_tables_base_addr: Address, ++) -> Result<(), MMUEnableError> { ++ arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr) +} + +/// Human-readable print of all recorded kernel mappings. @@ -2924,13 +3040,17 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory/mmu.rs 15_virtual_mem_p diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory.rs 15_virtual_mem_part2_mmio_remap/src/memory.rs --- 14_exceptions_part2_peripheral_IRQs/src/memory.rs +++ 15_virtual_mem_part2_mmio_remap/src/memory.rs -@@ -6,12 +6,85 @@ +@@ -6,12 +6,136 @@ pub mod mmu; -use core::ops::RangeInclusive; +use crate::common; -+use core::{marker::PhantomData, ops::RangeInclusive}; ++use core::{ ++ fmt, ++ marker::PhantomData, ++ ops::{AddAssign, RangeInclusive, SubAssign}, ++}; + +//-------------------------------------------------------------------------------------------------- +// Public Definitions @@ -2997,6 +3117,15 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory.rs 15_virtual_mem_part2 + } +} + ++impl AddAssign for Address { ++ fn add_assign(&mut self, other: Self) { ++ *self = Self { ++ value: self.value + other.into_usize(), ++ _address_type: PhantomData, ++ }; ++ } ++} ++ +impl core::ops::Sub for Address { + type Output = Self; + @@ -3007,6 +3136,44 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/src/memory.rs 15_virtual_mem_part2 + } + } +} ++ ++impl SubAssign for Address { ++ fn sub_assign(&mut self, other: Self) { ++ *self = Self { ++ value: self.value - other.into_usize(), ++ _address_type: PhantomData, ++ }; ++ } ++} ++ ++impl fmt::Display for Address { ++ // Don't expect to see physical addresses greater than 40 bit. ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let q3: u8 = ((self.value >> 32) & 0xff) as u8; ++ let q2: u16 = ((self.value >> 16) & 0xffff) as u16; ++ let q1: u16 = (self.value & 0xffff) as u16; ++ ++ write!(f, "0x")?; ++ write!(f, "{:02x}_", q3)?; ++ write!(f, "{:04x}_", q2)?; ++ write!(f, "{:04x}", q1) ++ } ++} ++ ++impl fmt::Display for Address { ++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ++ let q4: u16 = ((self.value >> 48) & 0xffff) as u16; ++ let q3: u16 = ((self.value >> 32) & 0xffff) as u16; ++ let q2: u16 = ((self.value >> 16) & 0xffff) as u16; ++ let q1: u16 = (self.value & 0xffff) as u16; ++ ++ write!(f, "0x")?; ++ write!(f, "{:04x}_", q4)?; ++ write!(f, "{:04x}_", q3)?; ++ write!(f, "{:04x}_", q2)?; ++ write!(f, "{:04x}", q1) ++ } ++} + /// Zero out an inclusive memory range. /// @@ -3024,17 +3191,25 @@ diff -uNr 14_exceptions_part2_peripheral_IRQs/tests/02_exception_sync_page_fault exception::handling_init(); bsp::console::qemu_bring_up_console(); -@@ -29,10 +29,22 @@ +@@ -29,10 +29,30 @@ println!("Testing synchronous exception handling by causing a page fault"); println!("-------------------------------------------------------------------\n"); -- if let Err(string) = memory::mmu::mmu().init() { +- if let Err(string) = memory::mmu::mmu().enable_mmu_and_caching() { - println!("MMU: {}", string); -+ if let Err(string) = memory::mmu::kernel_map_binary_and_enable_mmu() { -+ println!("Enabling MMU failed: {}", string); ++ let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() { ++ Err(string) => { ++ println!("Error mapping kernel binary: {}", string); ++ cpu::qemu_exit_failure() ++ } ++ Ok(addr) => addr, ++ }; ++ ++ if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) { ++ println!("Enabling MMU failed: {}", e); cpu::qemu_exit_failure() } -+ // Printing will silently fail fail from here on, because the driver's MMIO is not remapped yet. ++ // Printing will silently fail from here on, because the driver's MMIO is not remapped yet. + + // Bring up the drivers needed for printing first. + for i in bsp::driver::driver_manager() diff --git a/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu.rs b/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu.rs index ebf454ce..22311516 100644 --- a/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu.rs +++ b/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu.rs @@ -15,12 +15,9 @@ use crate::{ bsp, memory, - memory::{ - mmu::{translation_table::KernelTranslationTable, TranslationGranule}, - Address, Physical, - }, - synchronization::InitStateLock, + memory::{mmu::TranslationGranule, Address, Physical}, }; +use core::intrinsics::unlikely; use cortex_a::{barrier, regs::*}; //-------------------------------------------------------------------------------------------------- @@ -37,15 +34,6 @@ struct MemoryManagementUnit; pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>; pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>; -/// The min supported address space size. -pub const MIN_ADDR_SPACE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB - -/// The max supported address space size. -pub const MAX_ADDR_SPACE_SIZE: usize = 8 * 1024 * 1024 * 1024; // 8 GiB - -/// The supported address space size granule. -pub type AddrSpaceSizeGranule = Granule512MiB; - /// Constants for indexing the MAIR_EL1. #[allow(dead_code)] pub mod mair { @@ -57,20 +45,24 @@ pub mod mair { // Global instances //-------------------------------------------------------------------------------------------------- -/// The kernel translation tables. -/// -/// # Safety -/// -/// - Supposed to land in `.bss`. Therefore, ensure that all initial member values boil down to "0". -static KERNEL_TABLES: InitStateLock = - InitStateLock::new(KernelTranslationTable::new()); - static MMU: MemoryManagementUnit = MemoryManagementUnit; //-------------------------------------------------------------------------------------------------- // Private Code //-------------------------------------------------------------------------------------------------- +impl memory::mmu::AddressSpace { + /// Checks for architectural restrictions. + pub const fn arch_address_space_size_sanity_checks() { + // Size must be at least one full 512 MiB table. + assert!((AS_SIZE % Granule512MiB::SIZE) == 0); + + // Check for 48 bit virtual address size as maximum, which is supported by any ARMv8 + // version. + assert!(AS_SIZE <= (1 << 48)); + } +} + impl MemoryManagementUnit { /// Setup function for the MAIR_EL1 register. fn set_up_mair(&self) { @@ -87,19 +79,19 @@ impl MemoryManagementUnit { /// Configure various settings of stage 1 of the EL1 translation regime. fn configure_translation_control(&self) { - let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); - let t0sz = (64 - bsp::memory::mmu::KernelVirtAddrSpaceSize::SHIFT) as u64; + let t0sz = (64 - bsp::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64; TCR_EL1.write( - TCR_EL1::TBI0::Ignored - + TCR_EL1::IPS.val(ips) - + TCR_EL1::EPD1::DisableTTBR1Walks + TCR_EL1::TBI0::Used + + TCR_EL1::IPS::Bits_40 + TCR_EL1::TG0::KiB_64 + TCR_EL1::SH0::Inner + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::T0SZ.val(t0sz), + + TCR_EL1::A1::TTBR0 + + TCR_EL1::T0SZ.val(t0sz) + + TCR_EL1::EPD1::DisableTTBR1Walks, ); } } @@ -108,11 +100,6 @@ impl MemoryManagementUnit { // Public Code //-------------------------------------------------------------------------------------------------- -/// Return a guarded reference to the kernel's translation tables. -pub fn kernel_translation_tables() -> &'static InitStateLock { - &KERNEL_TABLES -} - /// Return a reference to the MMU instance. pub fn mmu() -> &'static impl memory::mmu::interface::MMU { &MMU @@ -121,22 +108,29 @@ pub fn mmu() -> &'static impl memory::mmu::interface::MMU { //------------------------------------------------------------------------------ // OS Interface Code //------------------------------------------------------------------------------ +use memory::mmu::MMUEnableError; impl memory::mmu::interface::MMU for MemoryManagementUnit { - unsafe fn enable( + unsafe fn enable_mmu_and_caching( &self, - kernel_table_phys_base_addr: Address, - ) -> Result<(), &'static str> { - // Fail early if translation granule is not supported. Both RPis support it, though. - if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { - return Err("Translation granule not supported in HW"); + phys_tables_base_addr: Address, + ) -> Result<(), MMUEnableError> { + if unlikely(self.is_enabled()) { + return Err(MMUEnableError::AlreadyEnabled); + } + + // Fail early if translation granule is not supported. + if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) { + return Err(MMUEnableError::Other( + "Translation granule not supported in HW", + )); } // Prepare the memory attribute indirection register. self.set_up_mair(); // Set the "Translation Table Base Register". - TTBR0_EL1.set_baddr(kernel_table_phys_base_addr.into_usize() as u64); + TTBR0_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64); self.configure_translation_control(); @@ -153,23 +147,9 @@ impl memory::mmu::interface::MMU for MemoryManagementUnit { Ok(()) } -} - -//-------------------------------------------------------------------------------------------------- -// Testing -//-------------------------------------------------------------------------------------------------- - -#[cfg(test)] -mod tests { - use super::*; - use test_macros::kernel_test; - - /// Check if KERNEL_TABLES is in .bss. - #[kernel_test] - fn kernel_tables_in_bss() { - let bss_range = bsp::memory::bss_range_inclusive(); - let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64; - assert!(bss_range.contains(&kernel_tables_addr)); + #[inline(always)] + fn is_enabled(&self) -> bool { + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) } } diff --git a/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu/translation_table.rs b/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu/translation_table.rs index f682d6a4..15784a1b 100644 --- a/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu/translation_table.rs +++ b/15_virtual_mem_part2_mmio_remap/src/_arch/aarch64/memory/mmu/translation_table.rs @@ -20,7 +20,7 @@ use crate::{ arch_mmu::{Granule512MiB, Granule64KiB}, AccessPermissions, AttributeFields, MemAttributes, Page, PageSliceDescriptor, }, - Address, AddressType, Physical, Virtual, + Address, Physical, Virtual, }, }; use core::convert; @@ -90,8 +90,8 @@ register_bitfields! {u64, AttrIndx OFFSET(2) NUMBITS(3) [], TYPE OFFSET(1) NUMBITS(1) [ - Block = 0, - Table = 1 + Reserved_Invalid = 0, + Page = 1 ], VALID OFFSET(0) NUMBITS(1) [ @@ -119,19 +119,16 @@ struct PageDescriptor { value: u64, } -trait BaseAddr { - fn phys_base_addr(&self) -> Address; +trait StartAddr { + fn phys_start_addr(&self) -> Address; } -const NUM_LVL2_TABLES: usize = - bsp::memory::mmu::KernelVirtAddrSpaceSize::SIZE >> Granule512MiB::SHIFT; - //-------------------------------------------------------------------------------------------------- // Public Definitions //-------------------------------------------------------------------------------------------------- /// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB -/// aligned, hence the "reverse" order of appearance. +/// aligned, so the lvl3 is put first. #[repr(C)] #[repr(align(65536))] pub struct FixedSizeTranslationTable { @@ -148,16 +145,13 @@ pub struct FixedSizeTranslationTable { initialized: bool, } -/// A translation table type for the kernel space. -pub type KernelTranslationTable = FixedSizeTranslationTable; - //-------------------------------------------------------------------------------------------------- // Private Code //-------------------------------------------------------------------------------------------------- -impl BaseAddr for [T; N] { - fn phys_base_addr(&self) -> Address { - // The binary is still identity mapped, so we don't need to convert here. +// The binary is still identity mapped, so we don't need to convert here. +impl StartAddr for [T; N] { + fn phys_start_addr(&self) -> Address { Address::new(self as *const _ as usize) } } @@ -171,14 +165,14 @@ impl TableDescriptor { } /// Create an instance pointing to the supplied address. - pub fn from_next_lvl_table_addr(next_lvl_table_addr: usize) -> Self { + pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address) -> Self { let val = InMemoryRegister::::new(0); - let shifted = next_lvl_table_addr >> Granule64KiB::SHIFT; + let shifted = phys_next_lvl_table_addr.into_usize() >> Granule64KiB::SHIFT; val.write( - STAGE1_TABLE_DESCRIPTOR::VALID::True + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + STAGE1_TABLE_DESCRIPTOR::TYPE::Table - + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64), + + STAGE1_TABLE_DESCRIPTOR::VALID::True, ); TableDescriptor { value: val.get() } @@ -232,18 +226,18 @@ impl PageDescriptor { /// Create an instance. pub fn from_output_addr( - output_addr: *const Page, + phys_output_addr: *const Page, attribute_fields: &AttributeFields, ) -> Self { let val = InMemoryRegister::::new(0); - let shifted = output_addr as u64 >> Granule64KiB::SHIFT; + let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; val.write( - STAGE1_PAGE_DESCRIPTOR::VALID::True + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted) + STAGE1_PAGE_DESCRIPTOR::AF::True - + attribute_fields.clone().into() - + STAGE1_PAGE_DESCRIPTOR::TYPE::Table - + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted), + + STAGE1_PAGE_DESCRIPTOR::TYPE::Page + + STAGE1_PAGE_DESCRIPTOR::VALID::True + + attribute_fields.clone().into(), ); Self { value: val.get() } @@ -260,6 +254,14 @@ impl PageDescriptor { // Public Code //-------------------------------------------------------------------------------------------------- +impl memory::mmu::AssociatedTranslationTable + for memory::mmu::AddressSpace +where + [u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized, +{ + type TableStartFromBottom = FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }>; +} + impl FixedSizeTranslationTable { // Reserve the last 256 MiB of the address space for MMIO mappings. const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1; @@ -269,8 +271,9 @@ impl FixedSizeTranslationTable { #[allow(clippy::assertions_on_constants)] pub const fn new() -> Self { assert!(bsp::memory::mmu::KernelGranule::SIZE == Granule64KiB::SIZE); + + // Can't have a zero-sized address space. assert!(NUM_TABLES > 0); - assert!((bsp::memory::mmu::KernelVirtAddrSpaceSize::SIZE % Granule512MiB::SIZE) == 0); Self { lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], @@ -282,7 +285,7 @@ impl FixedSizeTranslationTable { /// The start address of the table's MMIO range. #[inline(always)] - const fn mmio_start_addr(&self) -> Address { + fn mmio_start_addr(&self) -> Address { Address::new( (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) | (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT), @@ -291,7 +294,7 @@ impl FixedSizeTranslationTable { /// The inclusive end address of the table's MMIO range. #[inline(always)] - const fn mmio_end_addr_inclusive(&self) -> Address { + fn mmio_end_addr_inclusive(&self) -> Address { Address::new( (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) | (8191 << Granule64KiB::SHIFT) @@ -301,12 +304,13 @@ impl FixedSizeTranslationTable { /// Helper to calculate the lvl2 and lvl3 indices from an address. #[inline(always)] - fn lvl2_lvl3_index_from( + fn lvl2_lvl3_index_from( &self, - addr: *const Page, + addr: *const Page, ) -> Result<(usize, usize), &'static str> { - let lvl2_index = addr as usize >> Granule512MiB::SHIFT; - let lvl3_index = (addr as usize & Granule512MiB::MASK) >> Granule64KiB::SHIFT; + let addr = addr as usize; + let lvl2_index = addr >> Granule512MiB::SHIFT; + let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT; if lvl2_index > (NUM_TABLES - 1) { return Err("Virtual page is out of bounds of translation table"); @@ -334,16 +338,15 @@ impl FixedSizeTranslationTable { impl memory::mmu::translation_table::interface::TranslationTable for FixedSizeTranslationTable { - unsafe fn init(&mut self) { + fn init(&mut self) { if self.initialized { return; } // Populate the l2 entries. for (lvl2_nr, lvl2_entry) in self.lvl2.iter_mut().enumerate() { - let desc = TableDescriptor::from_next_lvl_table_addr( - self.lvl3[lvl2_nr].phys_base_addr().into_usize(), - ); + let desc = + TableDescriptor::from_next_lvl_table_addr(self.lvl3[lvl2_nr].phys_start_addr()); *lvl2_entry = desc; } @@ -352,7 +355,7 @@ impl memory::mmu::translation_table::interface::Transla } fn phys_base_address(&self) -> Address { - self.lvl2.phys_base_addr() + self.lvl2.phys_start_addr() } unsafe fn map_pages_at( @@ -366,15 +369,15 @@ impl memory::mmu::translation_table::interface::Transla let p = phys_pages.as_slice(); let v = virt_pages.as_slice(); - if p.len() != v.len() { - return Err("Tried to map page slices with unequal sizes"); - } - // No work to do for empty slices. - if p.is_empty() { + if v.is_empty() { return Ok(()); } + if v.len() != p.len() { + return Err("Tried to map page slices with unequal sizes"); + } + if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page() { return Err("Tried to map outside of physical address space"); } @@ -406,14 +409,13 @@ impl memory::mmu::translation_table::interface::Transla return Err("Not enough MMIO space left"); } - let addr = (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) - | (self.cur_l3_mmio_index << Granule64KiB::SHIFT); + let addr = Address::new( + (Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT) + | (self.cur_l3_mmio_index << Granule64KiB::SHIFT), + ); self.cur_l3_mmio_index += num_pages; - Ok(PageSliceDescriptor::from_addr( - Address::new(addr), - num_pages, - )) + Ok(PageSliceDescriptor::from_addr(addr, num_pages)) } fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor) -> bool { @@ -435,7 +437,7 @@ impl memory::mmu::translation_table::interface::Transla //-------------------------------------------------------------------------------------------------- #[cfg(test)] -pub type MinSizeKernelTranslationTable = FixedSizeTranslationTable<1>; +pub type MinSizeTranslationTable = FixedSizeTranslationTable<1>; #[cfg(test)] mod tests { diff --git a/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/console.rs b/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/console.rs index f63a9e9b..abf8f89c 100644 --- a/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/console.rs +++ b/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/console.rs @@ -5,7 +5,7 @@ //! BSP console facilities. use super::memory; -use crate::{bsp::device_driver, console, cpu}; +use crate::{bsp::device_driver, console, cpu, driver}; use core::fmt; //-------------------------------------------------------------------------------------------------- @@ -23,7 +23,7 @@ use core::fmt; /// /// - Use only for printing during a panic. pub unsafe fn panic_console_out() -> impl fmt::Write { - use crate::driver::interface::DeviceDriver; + use driver::interface::DeviceDriver; let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.into_usize()); let mut panic_uart = diff --git a/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs b/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs index 76b93e47..842384de 100644 --- a/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs +++ b/15_virtual_mem_part2_mmio_remap/src/bsp/raspberrypi/memory/mmu.rs @@ -7,15 +7,23 @@ use crate::{ common, memory::{ - mmu as kernel_mmu, + mmu as generic_mmu, mmu::{ - AccessPermissions, AddressSpaceSize, AttributeFields, MemAttributes, Page, - PageSliceDescriptor, TranslationGranule, + AccessPermissions, AddressSpace, AssociatedTranslationTable, AttributeFields, + MemAttributes, Page, PageSliceDescriptor, TranslationGranule, }, Physical, Virtual, }, + synchronization::InitStateLock, }; +//-------------------------------------------------------------------------------------------------- +// Private Definitions +//-------------------------------------------------------------------------------------------------- + +type KernelTranslationTable = + ::TableStartFromBottom; + //-------------------------------------------------------------------------------------------------- // Public Definitions //-------------------------------------------------------------------------------------------------- @@ -24,8 +32,21 @@ use crate::{ /// derive respective data structures and their sizes. For example, the `crate::memory::mmu::Page`. pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>; -/// The address space size chosen by this BSP. -pub type KernelVirtAddrSpaceSize = AddressSpaceSize<{ 8 * 1024 * 1024 * 1024 }>; +/// The kernel's virtual address space defined by this BSP. +pub type KernelVirtAddrSpace = AddressSpace<{ 8 * 1024 * 1024 * 1024 }>; + +//-------------------------------------------------------------------------------------------------- +// Global instances +//-------------------------------------------------------------------------------------------------- + +/// The kernel translation tables. +/// +/// It is mandatory that InitStateLock is transparent. +/// +/// That is, `size_of(InitStateLock) == size_of(KernelTranslationTable)`. +/// There is a unit tests that checks this porperty. +static KERNEL_TABLES: InitStateLock = + InitStateLock::new(KernelTranslationTable::new()); //-------------------------------------------------------------------------------------------------- // Private Code @@ -81,6 +102,11 @@ fn phys_data_page_desc() -> PageSliceDescriptor { // Public Code //-------------------------------------------------------------------------------------------------- +/// Return a reference to the kernel's translation tables. +pub fn kernel_translation_tables() -> &'static InitStateLock { + &KERNEL_TABLES +} + /// Pointer to the last page of the physical address space. pub fn phys_addr_space_end_page() -> *const Page { common::align_down( @@ -95,7 +121,7 @@ pub fn phys_addr_space_end_page() -> *const Page { /// /// - Any miscalculation or attribute error will likely be fatal. Needs careful manual checking. pub unsafe fn kernel_map_binary() -> Result<(), &'static str> { - kernel_mmu::kernel_map_pages_at( + generic_mmu::kernel_map_pages_at( "Kernel boot-core stack", &virt_stack_page_desc(), &phys_stack_page_desc(), @@ -106,7 +132,7 @@ pub unsafe fn kernel_map_binary() -> Result<(), &'static str> { }, )?; - kernel_mmu::kernel_map_pages_at( + generic_mmu::kernel_map_pages_at( "Kernel code and RO data", &virt_ro_page_desc(), &phys_ro_page_desc(), @@ -117,7 +143,7 @@ pub unsafe fn kernel_map_binary() -> Result<(), &'static str> { }, )?; - kernel_mmu::kernel_map_pages_at( + generic_mmu::kernel_map_pages_at( "Kernel data and bss", &virt_data_page_desc(), &phys_data_page_desc(), @@ -157,18 +183,27 @@ mod tests { #[kernel_test] fn virt_mem_layout_has_no_overlaps() { let layout = [ - virt_stack_page_desc().into_usize_range_inclusive(), - virt_ro_page_desc().into_usize_range_inclusive(), - virt_data_page_desc().into_usize_range_inclusive(), + virt_stack_page_desc(), + virt_ro_page_desc(), + virt_data_page_desc(), ]; for (i, first_range) in layout.iter().enumerate() { for second_range in layout.iter().skip(i + 1) { - assert!(!first_range.contains(second_range.start())); - assert!(!first_range.contains(second_range.end())); - assert!(!second_range.contains(first_range.start())); - assert!(!second_range.contains(first_range.end())); + assert!(!first_range.contains(second_range.start_addr())); + assert!(!first_range.contains(second_range.end_addr_inclusive())); + assert!(!second_range.contains(first_range.start_addr())); + assert!(!second_range.contains(first_range.end_addr_inclusive())); } } } + + /// Check if KERNEL_TABLES is in .bss. + #[kernel_test] + fn kernel_tables_in_bss() { + let bss_range = super::super::bss_range_inclusive(); + let kernel_tables_addr = &KERNEL_TABLES as *const _ as usize as *mut u64; + + assert!(bss_range.contains(&kernel_tables_addr)); + } } diff --git a/15_virtual_mem_part2_mmio_remap/src/lib.rs b/15_virtual_mem_part2_mmio_remap/src/lib.rs index dad5e903..30f684af 100644 --- a/15_virtual_mem_part2_mmio_remap/src/lib.rs +++ b/15_virtual_mem_part2_mmio_remap/src/lib.rs @@ -112,6 +112,7 @@ #![allow(clippy::clippy::upper_case_acronyms)] #![allow(incomplete_features)] #![feature(asm)] +#![feature(const_evaluatable_checked)] #![feature(const_fn)] #![feature(const_fn_fn_ptr_basics)] #![feature(const_generics)] diff --git a/15_virtual_mem_part2_mmio_remap/src/main.rs b/15_virtual_mem_part2_mmio_remap/src/main.rs index 11bb6902..40d3ed41 100644 --- a/15_virtual_mem_part2_mmio_remap/src/main.rs +++ b/15_virtual_mem_part2_mmio_remap/src/main.rs @@ -19,7 +19,7 @@ use libkernel::{bsp, cpu, driver, exception, info, memory, state, time, warn}; /// /// - Only a single core must be active and running this function. /// - The init calls in this function must appear in the correct order: -/// - Virtual memory must be activated before the device drivers. +/// - Caching must be activated before the device drivers. /// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device /// drivers (which currently employ IRQSafeNullLocks instead of spinlocks), will fail to /// work on the RPi SoCs. @@ -29,10 +29,15 @@ unsafe fn kernel_init() -> ! { exception::handling_init(); - if let Err(string) = memory::mmu::kernel_map_binary_and_enable_mmu() { - panic!("Enabling MMU failed: {}", string); + let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() { + Err(string) => panic!("Error mapping kernel binary: {}", string), + Ok(addr) => addr, + }; + + if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) { + panic!("Enabling MMU failed: {}", e); } - // Printing will silently fail fail from here on, because the driver's MMIO is not remapped yet. + // Printing will silently fail from here on, because the driver's MMIO is not remapped yet. // Bring up the drivers needed for printing first. for i in bsp::driver::driver_manager() diff --git a/15_virtual_mem_part2_mmio_remap/src/memory.rs b/15_virtual_mem_part2_mmio_remap/src/memory.rs index 1493b1a9..515731eb 100644 --- a/15_virtual_mem_part2_mmio_remap/src/memory.rs +++ b/15_virtual_mem_part2_mmio_remap/src/memory.rs @@ -7,7 +7,11 @@ pub mod mmu; use crate::common; -use core::{marker::PhantomData, ops::RangeInclusive}; +use core::{ + fmt, + marker::PhantomData, + ops::{AddAssign, RangeInclusive, SubAssign}, +}; //-------------------------------------------------------------------------------------------------- // Public Definitions @@ -74,6 +78,15 @@ impl core::ops::Add for Address { } } +impl AddAssign for Address { + fn add_assign(&mut self, other: Self) { + *self = Self { + value: self.value + other.into_usize(), + _address_type: PhantomData, + }; + } +} + impl core::ops::Sub for Address { type Output = Self; @@ -85,6 +98,44 @@ impl core::ops::Sub for Address { } } +impl SubAssign for Address { + fn sub_assign(&mut self, other: Self) { + *self = Self { + value: self.value - other.into_usize(), + _address_type: PhantomData, + }; + } +} + +impl fmt::Display for Address { + // Don't expect to see physical addresses greater than 40 bit. + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let q3: u8 = ((self.value >> 32) & 0xff) as u8; + let q2: u16 = ((self.value >> 16) & 0xffff) as u16; + let q1: u16 = (self.value & 0xffff) as u16; + + write!(f, "0x")?; + write!(f, "{:02x}_", q3)?; + write!(f, "{:04x}_", q2)?; + write!(f, "{:04x}", q1) + } +} + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let q4: u16 = ((self.value >> 48) & 0xffff) as u16; + let q3: u16 = ((self.value >> 32) & 0xffff) as u16; + let q2: u16 = ((self.value >> 16) & 0xffff) as u16; + let q1: u16 = (self.value & 0xffff) as u16; + + write!(f, "0x")?; + write!(f, "{:04x}_", q4)?; + write!(f, "{:04x}_", q3)?; + write!(f, "{:04x}_", q2)?; + write!(f, "{:04x}", q1) + } +} + /// Zero out an inclusive memory range. /// /// # Safety diff --git a/15_virtual_mem_part2_mmio_remap/src/memory/mmu.rs b/15_virtual_mem_part2_mmio_remap/src/memory/mmu.rs index b0205593..7a0115af 100644 --- a/15_virtual_mem_part2_mmio_remap/src/memory/mmu.rs +++ b/15_virtual_mem_part2_mmio_remap/src/memory/mmu.rs @@ -17,6 +17,7 @@ use crate::{ memory::{Address, Physical, Virtual}, synchronization, warn, }; +use core::fmt; pub use types::*; @@ -24,30 +25,48 @@ pub use types::*; // Public Definitions //-------------------------------------------------------------------------------------------------- +/// MMU enable errors variants. +#[allow(missing_docs)] +#[derive(Debug)] +pub enum MMUEnableError { + AlreadyEnabled, + Other(&'static str), +} + /// Memory Management interfaces. pub mod interface { use super::*; /// MMU functions. pub trait MMU { - /// Turns on the MMU. + /// Turns on the MMU for the first time and enables data and instruction caching. /// /// # Safety /// - /// - Must only be called after the kernel translation tables have been init()'ed. /// - Changes the HW's global state. - unsafe fn enable( + unsafe fn enable_mmu_and_caching( &self, - kernel_table_phys_base_addr: Address, - ) -> Result<(), &'static str>; + phys_tables_base_addr: Address, + ) -> Result<(), MMUEnableError>; + + /// Returns true if the MMU is enabled, false otherwise. + fn is_enabled(&self) -> bool; } } /// Describes the characteristics of a translation granule. pub struct TranslationGranule; -/// Describes the size of an address space. -pub struct AddressSpaceSize; +/// Describes properties of an address space. +pub struct AddressSpace; + +/// Intended to be implemented for [`AddressSpace`]. +pub trait AssociatedTranslationTable { + /// A translation table whose address range is: + /// + /// [0, AS_SIZE - 1] + type TableStartFromBottom; +} //-------------------------------------------------------------------------------------------------- // Private Code @@ -70,7 +89,7 @@ unsafe fn kernel_map_pages_at_unchecked( phys_pages: &PageSliceDescriptor, attr: &AttributeFields, ) -> Result<(), &'static str> { - arch_mmu::kernel_translation_tables() + bsp::memory::mmu::kernel_translation_tables() .write(|tables| tables.map_pages_at(virt_pages, phys_pages, attr))?; if let Err(x) = mapping_record::kernel_add(name, virt_pages, phys_pages, attr) { @@ -84,6 +103,15 @@ unsafe fn kernel_map_pages_at_unchecked( // Public Code //-------------------------------------------------------------------------------------------------- +impl fmt::Display for MMUEnableError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"), + MMUEnableError::Other(x) => write!(f, "{}", x), + } + } +} + impl TranslationGranule { /// The granule's size. pub const SIZE: usize = Self::size_checked(); @@ -101,22 +129,18 @@ impl TranslationGranule { } } -impl AddressSpaceSize { +impl AddressSpace { /// The address space size. pub const SIZE: usize = Self::size_checked(); /// The address space shift, aka log2(size). - pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize; const fn size_checked() -> usize { assert!(AS_SIZE.is_power_of_two()); - assert!(arch_mmu::MIN_ADDR_SPACE_SIZE.is_power_of_two()); - assert!(arch_mmu::MAX_ADDR_SPACE_SIZE.is_power_of_two()); - // Must adhere to architectural restrictions. - assert!(AS_SIZE >= arch_mmu::MIN_ADDR_SPACE_SIZE); - assert!(AS_SIZE <= arch_mmu::MAX_ADDR_SPACE_SIZE); - assert!((AS_SIZE % arch_mmu::AddrSpaceSizeGranule::SIZE) == 0); + // Check for architectural restrictions as well. + Self::arch_address_space_size_sanity_checks(); AS_SIZE } @@ -136,7 +160,7 @@ pub unsafe fn kernel_map_pages_at( phys_pages: &PageSliceDescriptor, attr: &AttributeFields, ) -> Result<(), &'static str> { - let is_mmio = arch_mmu::kernel_translation_tables() + let is_mmio = bsp::memory::mmu::kernel_translation_tables() .read(|tables| tables.is_virt_page_slice_mmio(virt_pages)); if is_mmio { return Err("Attempt to manually map into MMIO region"); @@ -169,8 +193,9 @@ pub unsafe fn kernel_map_mmio( addr // Otherwise, allocate a new virtual page slice and map it. } else { - let virt_pages: PageSliceDescriptor = arch_mmu::kernel_translation_tables() - .write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?; + let virt_pages: PageSliceDescriptor = + bsp::memory::mmu::kernel_translation_tables() + .write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?; kernel_map_pages_at_unchecked( name, @@ -189,19 +214,32 @@ pub unsafe fn kernel_map_mmio( Ok(virt_addr + offset_into_start_page) } -/// Map the kernel's binary and enable the MMU. +/// Map the kernel's binary. Returns the translation table's base address. /// /// # Safety /// -/// - Crucial function during kernel init. Changes the the complete memory view of the processor. -pub unsafe fn kernel_map_binary_and_enable_mmu() -> Result<(), &'static str> { - let phys_base_addr = arch_mmu::kernel_translation_tables().write(|tables| { - tables.init(); - tables.phys_base_address() - }); +/// - See [`bsp::memory::mmu::kernel_map_binary()`]. +pub unsafe fn kernel_map_binary() -> Result, &'static str> { + let phys_kernel_tables_base_addr = + bsp::memory::mmu::kernel_translation_tables().write(|tables| { + tables.init(); + tables.phys_base_address() + }); bsp::memory::mmu::kernel_map_binary()?; - arch_mmu::mmu().enable(phys_base_addr) + + Ok(phys_kernel_tables_base_addr) +} + +/// Enable the MMU and data + instruction caching. +/// +/// # Safety +/// +/// - Crucial function during kernel init. Changes the the complete memory view of the processor. +pub unsafe fn enable_mmu_and_caching( + phys_tables_base_addr: Address, +) -> Result<(), MMUEnableError> { + arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr) } /// Human-readable print of all recorded kernel mappings. diff --git a/15_virtual_mem_part2_mmio_remap/src/memory/mmu/mapping_record.rs b/15_virtual_mem_part2_mmio_remap/src/memory/mmu/mapping_record.rs index cd46403f..791ec491 100644 --- a/15_virtual_mem_part2_mmio_remap/src/memory/mmu/mapping_record.rs +++ b/15_virtual_mem_part2_mmio_remap/src/memory/mmu/mapping_record.rs @@ -111,12 +111,12 @@ impl MappingRecord { const KIB_RSHIFT: u32 = 10; // log2(1024). const MIB_RSHIFT: u32 = 20; // log2(1024 * 1024). - info!(" -----------------------------------------------------------------------------------------------------------------"); + info!(" -------------------------------------------------------------------------------------------------------------------------------------------"); info!( - " {:^24} {:^24} {:^7} {:^9} {:^35}", + " {:^44} {:^30} {:^7} {:^9} {:^35}", "Virtual", "Physical", "Size", "Attr", "Entity" ); - info!(" -----------------------------------------------------------------------------------------------------------------"); + info!(" -------------------------------------------------------------------------------------------------------------------------------------------"); for i in self .inner @@ -124,10 +124,10 @@ impl MappingRecord { .filter(|x| x.is_some()) .map(|x| x.unwrap()) { - let virt_start = i.virt_start_addr.into_usize(); + let virt_start = i.virt_start_addr; let virt_end_inclusive = virt_start + i.phys_pages.size() - 1; - let phys_start = i.phys_pages.start_addr().into_usize(); - let phys_end_inclusive = i.phys_pages.end_addr_inclusive().into_usize(); + let phys_start = i.phys_pages.start_addr(); + let phys_end_inclusive = i.phys_pages.end_addr_inclusive(); let size = i.phys_pages.size(); let (size, unit) = if (size >> MIB_RSHIFT) > 0 { @@ -155,7 +155,7 @@ impl MappingRecord { }; info!( - " {:#011X}..{:#011X} --> {:#011X}..{:#011X} | \ + " {}..{} --> {}..{} | \ {: >3} {} | {: <3} {} {: <2} | {}", virt_start, virt_end_inclusive, @@ -172,14 +172,14 @@ impl MappingRecord { for k in i.users[1..].iter() { if let Some(additional_user) = *k { info!( - " | {}", + " | {}", additional_user ); } } } - info!(" -----------------------------------------------------------------------------------------------------------------"); + info!(" -------------------------------------------------------------------------------------------------------------------------------------------"); } } diff --git a/15_virtual_mem_part2_mmio_remap/src/memory/mmu/translation_table.rs b/15_virtual_mem_part2_mmio_remap/src/memory/mmu/translation_table.rs index 7e16e606..65af83cb 100644 --- a/15_virtual_mem_part2_mmio_remap/src/memory/mmu/translation_table.rs +++ b/15_virtual_mem_part2_mmio_remap/src/memory/mmu/translation_table.rs @@ -16,7 +16,8 @@ use crate::memory::{ //-------------------------------------------------------------------------------------------------- // Architectural Public Reexports //-------------------------------------------------------------------------------------------------- -pub use arch_translation_table::KernelTranslationTable; +#[cfg(target_arch = "aarch64")] +pub use arch_translation_table::FixedSizeTranslationTable; //-------------------------------------------------------------------------------------------------- // Public Definitions @@ -34,7 +35,7 @@ pub mod interface { /// /// - Implementor must ensure that this function can run only once or is harmless if invoked /// multiple times. - unsafe fn init(&mut self); + fn init(&mut self); /// The translation table's base address to be used for programming the MMU. fn phys_base_address(&self) -> Address; @@ -80,17 +81,17 @@ pub mod interface { mod tests { use super::*; use crate::bsp; - use arch_translation_table::MinSizeKernelTranslationTable; + use arch_translation_table::MinSizeTranslationTable; use interface::TranslationTable; use test_macros::kernel_test; - /// Sanity checks for the kernel TranslationTable implementation. + /// Sanity checks for the TranslationTable implementation. #[kernel_test] fn translationtable_implementation_sanity() { - // Need to take care that `tables` fits into the stack. - let mut tables = MinSizeKernelTranslationTable::new(); + // This will occupy a lot of space on the stack. + let mut tables = MinSizeTranslationTable::new(); - unsafe { tables.init() }; + tables.init(); let x = tables.next_mmio_virt_page_slice(0); assert!(x.is_err()); diff --git a/15_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs b/15_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs index 59431588..f83ebb89 100644 --- a/15_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs +++ b/15_virtual_mem_part2_mmio_remap/src/memory/mmu/types.rs @@ -8,7 +8,7 @@ use crate::{ bsp, common, memory::{Address, AddressType, Physical, Virtual}, }; -use core::{convert::From, marker::PhantomData, ops::RangeInclusive}; +use core::{convert::From, marker::PhantomData}; //-------------------------------------------------------------------------------------------------- // Public Definitions @@ -121,6 +121,11 @@ impl PageSliceDescriptor { self.start + (self.size() - 1) } + /// Check if an address is contained within this descriptor. + pub fn contains(&self, addr: Address) -> bool { + (addr >= self.start_addr()) && (addr <= self.end_addr_inclusive()) + } + /// Return a non-mutable slice of Pages. /// /// # Safety @@ -129,14 +134,6 @@ impl PageSliceDescriptor { pub unsafe fn as_slice(&self) -> &[Page] { core::slice::from_raw_parts(self.first_page_ptr(), self.num_pages) } - - /// Return the inclusive address range of the slice. - pub fn into_usize_range_inclusive(self) -> RangeInclusive { - RangeInclusive::new( - self.start_addr().into_usize(), - self.end_addr_inclusive().into_usize(), - ) - } } impl From> for PageSliceDescriptor { diff --git a/15_virtual_mem_part2_mmio_remap/src/synchronization.rs b/15_virtual_mem_part2_mmio_remap/src/synchronization.rs index fe9d454a..94582732 100644 --- a/15_virtual_mem_part2_mmio_remap/src/synchronization.rs +++ b/15_virtual_mem_part2_mmio_remap/src/synchronization.rs @@ -139,3 +139,21 @@ impl interface::ReadWriteEx for InitStateLock { f(data) } } + +//-------------------------------------------------------------------------------------------------- +// Testing +//-------------------------------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use test_macros::kernel_test; + + /// InitStateLock must be transparent. + #[kernel_test] + fn init_state_lock_is_transparent() { + use core::mem::size_of; + + assert_eq!(size_of::>(), size_of::()); + } +} diff --git a/15_virtual_mem_part2_mmio_remap/tests/02_exception_sync_page_fault.rs b/15_virtual_mem_part2_mmio_remap/tests/02_exception_sync_page_fault.rs index 45f12a1f..940866a0 100644 --- a/15_virtual_mem_part2_mmio_remap/tests/02_exception_sync_page_fault.rs +++ b/15_virtual_mem_part2_mmio_remap/tests/02_exception_sync_page_fault.rs @@ -29,11 +29,19 @@ unsafe fn kernel_init() -> ! { println!("Testing synchronous exception handling by causing a page fault"); println!("-------------------------------------------------------------------\n"); - if let Err(string) = memory::mmu::kernel_map_binary_and_enable_mmu() { - println!("Enabling MMU failed: {}", string); + let phys_kernel_tables_base_addr = match memory::mmu::kernel_map_binary() { + Err(string) => { + println!("Error mapping kernel binary: {}", string); + cpu::qemu_exit_failure() + } + Ok(addr) => addr, + }; + + if let Err(e) = memory::mmu::enable_mmu_and_caching(phys_kernel_tables_base_addr) { + println!("Enabling MMU failed: {}", e); cpu::qemu_exit_failure() } - // Printing will silently fail fail from here on, because the driver's MMIO is not remapped yet. + // Printing will silently fail from here on, because the driver's MMIO is not remapped yet. // Bring up the drivers needed for printing first. for i in bsp::driver::driver_manager()