Add tutorial 16

pull/112/head
Andre Richter 3 years ago
parent 7fee5f7114
commit b4e3ebc606
No known key found for this signature in database
GPG Key ID: 2116C1AB102F615E

@ -16,6 +16,9 @@ Layout/IndentationWidth:
Layout/LineLength:
Max: 100
Lint/DeprecatedConstants:
Enabled: false
Metrics/ClassLength:
Enabled: false

@ -0,0 +1,2 @@
[target.'cfg(target_os = "none")']
runner = "target/kernel_test_runner.sh"

@ -0,0 +1,7 @@
{
"editor.formatOnSave": true,
"editor.rulers": [100],
"rust-analyzer.checkOnSave.overrideCommand": ["make", "check"],
"rust-analyzer.cargo.target": "aarch64-unknown-none-softfloat",
"rust-analyzer.cargo.features": ["bsp_rpi3"]
}

@ -0,0 +1,93 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "cortex-a"
version = "5.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ecefc30975eb87afc5a810d4b2305c0ec29e607ea97e51b2ecd80766e9268d28"
dependencies = [
"register",
]
[[package]]
name = "kernel"
version = "0.1.0"
dependencies = [
"cortex-a",
"qemu-exit",
"register",
"test-macros",
"test-types",
]
[[package]]
name = "proc-macro2"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
dependencies = [
"unicode-xid",
]
[[package]]
name = "qemu-exit"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "702abe3c3255be20a4d67bda326c4117081a49a57afaf752de4845091bc6b476"
[[package]]
name = "quote"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "register"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4a247de29ab7cc8f5006cfe775c4a81c704f9914c5e2a79696862e643135433"
dependencies = [
"tock-registers",
]
[[package]]
name = "syn"
version = "1.0.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "123a78a3596b24fee53a6464ce52d8ecbf62241e6294c7e7fe12086cd161f512"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "test-macros"
version = "0.1.0"
dependencies = [
"proc-macro2",
"quote",
"syn",
"test-types",
]
[[package]]
name = "test-types"
version = "0.1.0"
[[package]]
name = "tock-registers"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f521a79accce68c417c9c77ce22108056b626126da1932f7e2e9b5bbffee0cea"
[[package]]
name = "unicode-xid"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"

@ -0,0 +1,55 @@
[package]
name = "kernel"
version = "0.1.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2018"
[profile.release]
lto = true
[features]
default = []
bsp_rpi3 = ["register"]
bsp_rpi4 = ["register"]
test_build = ["qemu-exit"]
##--------------------------------------------------------------------------------------------------
## Dependencies
##--------------------------------------------------------------------------------------------------
[dependencies]
test-types = { path = "test-types" }
# Optional dependencies
register = { version = "1.x.x", features = ["no_std_unit_tests"], optional = true }
qemu-exit = { version = "1.x.x", optional = true }
# Platform specific dependencies
[target.'cfg(target_arch = "aarch64")'.dependencies]
cortex-a = { version = "5.x.x" }
##--------------------------------------------------------------------------------------------------
## Testing
##--------------------------------------------------------------------------------------------------
[dev-dependencies]
test-macros = { path = "test-macros" }
# Unit tests are done in the library part of the kernel.
[lib]
name = "libkernel"
test = true
# Disable unit tests for the kernel binary.
[[bin]]
name = "kernel"
test = false
# List of tests without harness.
[[test]]
name = "00_console_sanity"
harness = false
[[test]]
name = "02_exception_sync_page_fault"
harness = false

@ -0,0 +1,193 @@
## SPDX-License-Identifier: MIT OR Apache-2.0
##
## Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
include ../utils/color.mk.in
# Default to the RPi3
BSP ?= rpi3
# Default to a serial device name that is common in Linux.
DEV_SERIAL ?= /dev/ttyUSB0
# Query the host system's kernel name
UNAME_S = $(shell uname -s)
# BSP-specific arguments
ifeq ($(BSP),rpi3)
TARGET = aarch64-unknown-none-softfloat
KERNEL_BIN = kernel8.img
QEMU_BINARY = qemu-system-aarch64
QEMU_MACHINE_TYPE = raspi3
QEMU_RELEASE_ARGS = -serial stdio -display none
QEMU_TEST_ARGS = $(QEMU_RELEASE_ARGS) -semihosting
OBJDUMP_BINARY = aarch64-none-elf-objdump
NM_BINARY = aarch64-none-elf-nm
READELF_BINARY = aarch64-none-elf-readelf
OPENOCD_ARG = -f /openocd/tcl/interface/ftdi/olimex-arm-usb-tiny-h.cfg -f /openocd/rpi3.cfg
JTAG_BOOT_IMAGE = ../X1_JTAG_boot/jtag_boot_rpi3.img
LINKER_FILE = src/bsp/raspberrypi/link.ld
RUSTC_MISC_ARGS = -C target-cpu=cortex-a53
else ifeq ($(BSP),rpi4)
TARGET = aarch64-unknown-none-softfloat
KERNEL_BIN = kernel8.img
QEMU_BINARY = qemu-system-aarch64
QEMU_MACHINE_TYPE =
QEMU_RELEASE_ARGS = -serial stdio -display none
QEMU_TEST_ARGS = $(QEMU_RELEASE_ARGS) -semihosting
OBJDUMP_BINARY = aarch64-none-elf-objdump
NM_BINARY = aarch64-none-elf-nm
READELF_BINARY = aarch64-none-elf-readelf
OPENOCD_ARG = -f /openocd/tcl/interface/ftdi/olimex-arm-usb-tiny-h.cfg -f /openocd/rpi4.cfg
JTAG_BOOT_IMAGE = ../X1_JTAG_boot/jtag_boot_rpi4.img
LINKER_FILE = src/bsp/raspberrypi/link.ld
RUSTC_MISC_ARGS = -C target-cpu=cortex-a72
endif
# Export for build.rs
export LINKER_FILE
# Testing-specific arguments
ifdef TEST
ifeq ($(TEST),unit)
TEST_ARG = --lib
else
TEST_ARG = --test $(TEST)
endif
endif
QEMU_MISSING_STRING = "This board is not yet supported for QEMU."
RUSTFLAGS = -C link-arg=-T$(LINKER_FILE) $(RUSTC_MISC_ARGS)
RUSTFLAGS_PEDANTIC = $(RUSTFLAGS) -D warnings -D missing_docs
FEATURES = --features bsp_$(BSP)
COMPILER_ARGS = --target=$(TARGET) \
$(FEATURES) \
--release
RUSTC_CMD = cargo rustc $(COMPILER_ARGS)
DOC_CMD = cargo doc $(COMPILER_ARGS)
CLIPPY_CMD = cargo clippy $(COMPILER_ARGS)
CHECK_CMD = cargo check $(COMPILER_ARGS)
TEST_CMD = cargo test $(COMPILER_ARGS)
OBJCOPY_CMD = rust-objcopy \
--strip-all \
-O binary
KERNEL_ELF = target/$(TARGET)/release/kernel
DOCKER_IMAGE = rustembedded/osdev-utils
DOCKER_CMD = docker run --rm -v $(shell pwd):/work/tutorial -w /work/tutorial
DOCKER_CMD_INTERACT = $(DOCKER_CMD) -i -t
DOCKER_ARG_DIR_UTILS = -v $(shell pwd)/../utils:/work/utils
DOCKER_ARG_DIR_JTAG = -v $(shell pwd)/../X1_JTAG_boot:/work/X1_JTAG_boot
DOCKER_ARG_DEV = --privileged -v /dev:/dev
DOCKER_ARG_NET = --network host
DOCKER_QEMU = $(DOCKER_CMD_INTERACT) $(DOCKER_IMAGE)
DOCKER_GDB = $(DOCKER_CMD_INTERACT) $(DOCKER_ARG_NET) $(DOCKER_IMAGE)
DOCKER_TEST = $(DOCKER_CMD) $(DOCKER_IMAGE)
DOCKER_TOOLS = $(DOCKER_CMD) $(DOCKER_IMAGE)
# Dockerize commands that require USB device passthrough only on Linux
ifeq ($(UNAME_S),Linux)
DOCKER_CMD_DEV = $(DOCKER_CMD_INTERACT) $(DOCKER_ARG_DEV)
DOCKER_CHAINBOOT = $(DOCKER_CMD_DEV) $(DOCKER_ARG_DIR_UTILS) $(DOCKER_IMAGE)
DOCKER_JTAGBOOT = $(DOCKER_CMD_DEV) $(DOCKER_ARG_DIR_UTILS) $(DOCKER_ARG_DIR_JTAG) $(DOCKER_IMAGE)
DOCKER_OPENOCD = $(DOCKER_CMD_DEV) $(DOCKER_ARG_NET) $(DOCKER_IMAGE)
else
DOCKER_OPENOCD = echo "Not yet supported on non-Linux systems."; \#
endif
EXEC_QEMU = $(QEMU_BINARY) -M $(QEMU_MACHINE_TYPE)
EXEC_MINIPUSH = ruby ../utils/minipush.rb
.PHONY: all $(KERNEL_ELF) $(KERNEL_BIN) doc qemu test chainboot jtagboot openocd gdb gdb-opt0 \
clippy clean readelf objdump nm check
all: $(KERNEL_BIN)
$(KERNEL_ELF):
$(call colorecho, "\nCompiling kernel - $(BSP)")
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(RUSTC_CMD)
@$(DOCKER_TOOLS) ruby translation_table_tool/main.rb $(TARGET) $(BSP) $(KERNEL_ELF)
$(KERNEL_BIN): $(KERNEL_ELF)
@$(OBJCOPY_CMD) $(KERNEL_ELF) $(KERNEL_BIN)
doc:
$(call colorecho, "\nGenerating docs")
@$(DOC_CMD) --document-private-items --open
ifeq ($(QEMU_MACHINE_TYPE),)
qemu test:
$(call colorecho, "\n$(QEMU_MISSING_STRING)")
else
qemu: $(KERNEL_BIN)
$(call colorecho, "\nLaunching QEMU")
@$(DOCKER_QEMU) $(EXEC_QEMU) $(QEMU_RELEASE_ARGS) -kernel $(KERNEL_BIN)
define KERNEL_TEST_RUNNER
#!/usr/bin/env bash
TEST_ELF=$$(echo $$1 | sed -e 's/.*target/target/g')
TEST_BINARY=$$(echo $$1.img | sed -e 's/.*target/target/g')
$(DOCKER_TOOLS) ruby translation_table_tool/main.rb $(TARGET) $(BSP) $$TEST_ELF > /dev/null
$(OBJCOPY_CMD) $$TEST_ELF $$TEST_BINARY
$(DOCKER_TEST) ruby tests/runner.rb $(EXEC_QEMU) $(QEMU_TEST_ARGS) -kernel $$TEST_BINARY
endef
export KERNEL_TEST_RUNNER
test: FEATURES += --features test_build
test:
$(call colorecho, "\nCompiling test(s) - $(BSP)")
@mkdir -p target
@echo "$$KERNEL_TEST_RUNNER" > target/kernel_test_runner.sh
@chmod +x target/kernel_test_runner.sh
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(TEST_CMD) $(TEST_ARG)
endif
chainboot: $(KERNEL_BIN)
@$(DOCKER_CHAINBOOT) $(EXEC_MINIPUSH) $(DEV_SERIAL) $(KERNEL_BIN)
jtagboot:
@$(DOCKER_JTAGBOOT) $(EXEC_MINIPUSH) $(DEV_SERIAL) $(JTAG_BOOT_IMAGE)
openocd:
$(call colorecho, "\nLaunching OpenOCD")
@$(DOCKER_OPENOCD) openocd $(OPENOCD_ARG)
gdb: RUSTC_MISC_ARGS += -C debuginfo=2
gdb-opt0: RUSTC_MISC_ARGS += -C debuginfo=2 -C opt-level=0
gdb gdb-opt0: $(KERNEL_ELF)
$(call colorecho, "\nLaunching GDB")
@$(DOCKER_GDB) gdb-multiarch -q $(KERNEL_ELF)
clippy:
@RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(CLIPPY_CMD)
clean:
rm -rf target $(KERNEL_BIN)
readelf: $(KERNEL_ELF)
$(call colorecho, "\nLaunching readelf")
@$(DOCKER_TOOLS) $(READELF_BINARY) --headers $(KERNEL_ELF)
objdump: $(KERNEL_ELF)
$(call colorecho, "\nLaunching objdump")
@$(DOCKER_TOOLS) $(OBJDUMP_BINARY) --disassemble --demangle \
--section .text \
--section .rodata \
--section .got \
$(KERNEL_ELF) | rustfilt
nm: $(KERNEL_ELF)
$(call colorecho, "\nLaunching nm")
@$(DOCKER_TOOLS) $(NM_BINARY) --demangle --print-size $(KERNEL_ELF) | sort | rustfilt
# For rust-analyzer
check:
@RUSTFLAGS="$(RUSTFLAGS)" $(CHECK_CMD) --message-format=json

@ -0,0 +1,634 @@
# Tutorial 16 - Virtual Memory Part 4: Higher-Half Kernel
## tl;dr
- The time has come: We map and run the kernel from the top of the 64 bit virtual address space! 🥳
## Table of Contents
- [Introduction](#introduction)
- [Implementation](#implementation)
- [Position-Independent Boot Code](#position-independent-boot-code)
- [Test it](#test-it)
- [Diff to previous](#diff-to-previous)
## Introduction
A long time in the making, in this tutorial we finally map the kernel to the most significant area
(alternatively: higher-half) of the 64 bit virtual address space. This makes room for future
applications to use the whole of the least significant area of the virtual memory space.
As has been teased since `tutorial 14`, we will make use of the `AArch64`'s `TTBR1`. Since the
kernel's virtual address space size is `2 GiB` since the last tutorial, `TTBR1` will cover the range
from `0xffff_ffff_ffff_ffff` down to `ffff_ffff_8000_0000` (both inclusive).
## Implementation
In `src/memory/mmu.rs`, we extend the `AssociatedTranslationTable` trait with a `TableStartFromTop`
associated type:
```rust
/// Intended to be implemented for [`AddressSpace`].
pub trait AssociatedTranslationTable {
/// A translation table whose address range is:
///
/// [u64::MAX, (u64::MAX - AS_SIZE) + 1]
type TableStartFromTop;
/// A translation table whose address range is:
///
/// [0, AS_SIZE - 1]
type TableStartFromBottom;
}
```
Architecture specific code in `_arch/aarch64/memory/mmu/translation_table.rs` populates both types
now by making use of a new generic that is added to `FixedSizeTranslationTable`, which defines
whether the covered address space starts from the top or the bottom:
```rust
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize, const START_FROM_TOP: bool> {
...
```
```rust
impl<const AS_SIZE: usize> memory::mmu::AssociatedTranslationTable
for memory::mmu::AddressSpace<AS_SIZE>
where
[u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized,
{
type TableStartFromTop =
FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, true>;
type TableStartFromBottom =
FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, false>;
}
```
Thanks to this infrastructure, `BSP` Rust code in `bsp/raspberrypi/memory/mmu.rs` only needs to
change to this newly introduced type in order to switch from lower half to higher half translation
tables for the kernel:
```rust
type KernelTranslationTable =
<KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromTop;
```
In the `link.ld` linker script, we define a new symbol `__kernel_virt_start_addr` now, which is the
start address of the kernel's virtual address space, calculated as `(u64::MAX -
__kernel_virt_addr_space_size) + 1`. In order to make virtual-to-physical address translation easier
for the human eye (and mind), we link the kernel itself at `__kernel_virt_start_addr +
__rpi_load_addr`.
Before these tutorials, the first mapped address of the kernel binary was always located at
`__rpi_load_addr == 0x8_0000`. Starting with this tutorial, due to the `2 GiB` virtual address space
size, the new first mapped address is `ffff_ffff_8008_0000`. So by ignoring the upper bits of the
address, you can easily derive the physical address.
The changes in the `_arch` `MMU` driver are minimal, and mostly concerned with configuring `TCR_EL1`
for use with `TTBR1_EL1` now. And of course, setting `TTBR1_EL1` in `fn
enable_mmu_and_caching(...)`.
### Position-Independent Boot Code
Remember all the fuss that we made about `position-independent code` that will be needed until the
`MMU` is enabled. Let's quickly check what it means for us in reality now:
In `_arch/aarch64/cpu/boot.rs`, we turn on the `MMU` just before returning from `EL2` to `EL1`. So
by the time the CPU enters `EL1`, virtual memory will be active, and the CPU must therefore use the
new higher-half `virtual addresses` for everything it does.
Specifically, this means the address from which the CPU should execute upon entering `EL1` (function
`runtime_init()`) must be a valid _virtual address_, same as the stack pointer's address. Both of
them are programmed in function `fn prepare_el2_to_el1_transition(...)`, so we must ensure now that
_link-time_ addresses are used here. For this reason, retrieval of these addresses happens in
`assembly` in `boot.s`, where we can explicitly enforce generation of **absolute** addresses:
```asm
// Load the _absolute_ addresses of the following symbols. Since the kernel is linked at
// the top of the 64 bit address space, these are effectively virtual addresses.
ADR_ABS x1, __boot_core_stack_end_exclusive
ADR_ABS x2, runtime_init
```
Both values are forwarded to the Rust entry point function `_start_rust()`, which in turn forwards
them to `fn prepare_el2_to_el1_transition(...)`.
One more thing to consider is that we keep on programming the boot core's stack address for `EL2`
using an address that is calculated `PC-relative`, because all the `EL2` code will still run while
virtual memory _is disabled_. As such, we need the "physical" address of the stack, so to speak.
The previous tutorial also explained that it is not easily possible to compile select files using
`-fpic` in `Rust`. Still, we are doing some function calls in `Rust` before virtual memory is
enabled, so _theoretically_, there is room for failure. However, branches to local code in `AArch64`
are usually generated PC-relative. So it is a small risk worth taking. Should it still fail someday,
at least our automated CI pipeline would give notice when the tests start to fail.
## Test it
That's it! We are ready for a higher-half kernel now. Power up your Raspberrys and marvel at those
beautiful (virtual) addresses:
Raspberry Pi 3:
```console
$ make chainboot
[...]
Precomputing kernel translation tables and patching kernel ELF
--------------------------------------------------
Section Start Virt Addr Size
--------------------------------------------------
Generating Code and RO data | 0xffff_ffff_8008_0000 | 64 KiB
Generating Data and bss | 0xffff_ffff_8009_0000 | 384 KiB
Generating Boot-core stack | 0xffff_ffff_8010_0000 | 512 KiB
--------------------------------------------------
Patching Kernel table struct at physical 0x9_0000
Patching Value of kernel table physical base address (0xd_0000) at physical 0x8_0060
Finished in 0.03s
Minipush 1.0
[MP] ⏳ Waiting for /dev/ttyUSB0
[MP] ✅ Serial connected
[MP] 🔌 Please power the target now
__ __ _ _ _ _
| \/ (_)_ _ (_) | ___ __ _ __| |
| |\/| | | ' \| | |__/ _ \/ _` / _` |
|_| |_|_|_||_|_|____\___/\__,_\__,_|
Raspberry Pi 3
[ML] Requesting binary
[MP] ⏩ Pushing 387 KiB =======================================🦀 100% 96 KiB/s Time: 00:00:04
[ML] Loaded! Executing the payload now
[ 4.320829] Booting on: Raspberry Pi 3
[ 4.321102] MMU online:
[ 4.321394] -------------------------------------------------------------------------------------------------------------------------------------------
[ 4.323138] Virtual Physical Size Attr Entity
[ 4.324882] -------------------------------------------------------------------------------------------------------------------------------------------
[ 4.326629] 0xffff_ffff_8008_0000..0xffff_ffff_8008_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data
[ 4.328243] 0xffff_ffff_8009_0000..0xffff_ffff_800e_ffff --> 0x00_0009_0000..0x00_000e_ffff | 384 KiB | C RW XN | Kernel data and bss
[ 4.329813] 0xffff_ffff_8010_0000..0xffff_ffff_8017_ffff --> 0x00_0010_0000..0x00_0017_ffff | 512 KiB | C RW XN | Kernel boot-core stack
[ 4.331416] 0xffff_ffff_f000_0000..0xffff_ffff_f000_ffff --> 0x00_3f20_0000..0x00_3f20_ffff | 64 KiB | Dev RW XN | BCM GPIO
[ 4.332867] | BCM PL011 UART
[ 4.334385] 0xffff_ffff_f001_0000..0xffff_ffff_f001_ffff --> 0x00_3f00_0000..0x00_3f00_ffff | 64 KiB | Dev RW XN | BCM Peripheral Interrupt Controller
[ 4.336128] -------------------------------------------------------------------------------------------------------------------------------------------
```
Raspberry Pi 4:
```console
$ BSP=rpi4 make chainboot
[...]
Precomputing kernel translation tables and patching kernel ELF
--------------------------------------------------
Section Start Virt Addr Size
--------------------------------------------------
Generating Code and RO data | 0xffff_ffff_8008_0000 | 64 KiB
Generating Data and bss | 0xffff_ffff_8009_0000 | 448 KiB
Generating Boot-core stack | 0xffff_ffff_8011_0000 | 512 KiB
--------------------------------------------------
Patching Kernel table struct at physical 0xa_0000
Patching Value of kernel table physical base address (0xe_0000) at physical 0x8_0068
Finished in 0.03s
Minipush 1.0
[MP] ⏳ Waiting for /dev/ttyUSB0
[MP] ✅ Serial connected
[MP] 🔌 Please power the target now
__ __ _ _ _ _
| \/ (_)_ _ (_) | ___ __ _ __| |
| |\/| | | ' \| | |__/ _ \/ _` / _` |
|_| |_|_|_||_|_|____\___/\__,_\__,_|
Raspberry Pi 4
[ML] Requesting binary
[MP] ⏩ Pushing 449 KiB ======================================🦀 100% 112 KiB/s Time: 00:00:04
[ML] Loaded! Executing the payload now
[ 5.011490] Booting on: Raspberry Pi 4
[ 5.011588] MMU online:
[ 5.011881] -------------------------------------------------------------------------------------------------------------------------------------------
[ 5.013625] Virtual Physical Size Attr Entity
[ 5.015369] -------------------------------------------------------------------------------------------------------------------------------------------
[ 5.017115] 0xffff_ffff_8008_0000..0xffff_ffff_8008_ffff --> 0x00_0008_0000..0x00_0008_ffff | 64 KiB | C RO X | Kernel code and RO data
[ 5.018730] 0xffff_ffff_8009_0000..0xffff_ffff_800f_ffff --> 0x00_0009_0000..0x00_000f_ffff | 448 KiB | C RW XN | Kernel data and bss
[ 5.020299] 0xffff_ffff_8011_0000..0xffff_ffff_8018_ffff --> 0x00_0011_0000..0x00_0018_ffff | 512 KiB | C RW XN | Kernel boot-core stack
[ 5.021903] 0xffff_ffff_f000_0000..0xffff_ffff_f000_ffff --> 0x00_fe20_0000..0x00_fe20_ffff | 64 KiB | Dev RW XN | BCM GPIO
[ 5.023354] | BCM PL011 UART
[ 5.024871] 0xffff_ffff_f001_0000..0xffff_ffff_f001_ffff --> 0x00_ff84_0000..0x00_ff84_ffff | 64 KiB | Dev RW XN | GICD
[ 5.026279] | GICC
[ 5.027687] -------------------------------------------------------------------------------------------------------------------------------------------
```
## Diff to previous
```diff
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.rs 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/cpu/boot.rs
--- 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/cpu/boot.rs
@@ -11,7 +11,7 @@
//!
//! crate::cpu::boot::arch_boot
-use crate::{cpu, memory, memory::Address, runtime_init};
+use crate::{cpu, memory, memory::Address};
use core::intrinsics::unlikely;
use cortex_a::{asm, regs::*};
@@ -29,7 +29,10 @@
/// - The `bss` section is not initialized yet. The code must not use or reference it in any way.
/// - The HW state of EL1 must be prepared in a sound way.
#[inline(always)]
-unsafe fn prepare_el2_to_el1_transition(phys_boot_core_stack_end_exclusive_addr: u64) {
+unsafe fn prepare_el2_to_el1_transition(
+ virt_boot_core_stack_end_exclusive_addr: u64,
+ virt_runtime_init_addr: u64,
+) {
// Enable timer counter registers for EL1.
CNTHCTL_EL2.write(CNTHCTL_EL2::EL1PCEN::SET + CNTHCTL_EL2::EL1PCTEN::SET);
@@ -52,11 +55,11 @@
);
// Second, let the link register point to runtime_init().
- ELR_EL2.set(runtime_init::runtime_init as *const () as u64);
+ ELR_EL2.set(virt_runtime_init_addr);
// Set up SP_EL1 (stack pointer), which will be used by EL1 once we "return" to it. Since there
// are no plans to ever return to EL2, just re-use the same stack.
- SP_EL1.set(phys_boot_core_stack_end_exclusive_addr);
+ SP_EL1.set(virt_boot_core_stack_end_exclusive_addr);
}
//--------------------------------------------------------------------------------------------------
@@ -74,9 +77,13 @@
#[no_mangle]
pub unsafe extern "C" fn _start_rust(
phys_kernel_tables_base_addr: u64,
- phys_boot_core_stack_end_exclusive_addr: u64,
+ virt_boot_core_stack_end_exclusive_addr: u64,
+ virt_runtime_init_addr: u64,
) -> ! {
- prepare_el2_to_el1_transition(phys_boot_core_stack_end_exclusive_addr);
+ prepare_el2_to_el1_transition(
+ virt_boot_core_stack_end_exclusive_addr,
+ virt_runtime_init_addr,
+ );
// Turn on the MMU for EL1.
let addr = Address::new(phys_kernel_tables_base_addr as usize);
@@ -84,6 +91,7 @@
cpu::wait_forever();
}
- // Use `eret` to "return" to EL1. This results in execution of runtime_init() in EL1.
+ // Use `eret` to "return" to EL1. Since virtual memory will already be enabled, this results in
+ // execution of runtime_init() in EL1 from its _virtual address_.
asm::eret()
}
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.s 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/cpu/boot.s
--- 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/cpu/boot.s
+++ 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/cpu/boot.s
@@ -18,6 +18,18 @@
add \register, \register, #:lo12:\symbol
.endm
+// Load the address of a symbol into a register, absolute.
+//
+// # Resources
+//
+// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
+.macro ADR_ABS register, symbol
+ movz \register, #:abs_g3:\symbol
+ movk \register, #:abs_g2_nc:\symbol
+ movk \register, #:abs_g1_nc:\symbol
+ movk \register, #:abs_g0_nc:\symbol
+.endm
+
.equ _EL2, 0x8
.equ _core_id_mask, 0b11
@@ -47,11 +59,23 @@
// Load the base address of the kernel's translation tables.
ldr x0, PHYS_KERNEL_TABLES_BASE_ADDR // provided by bsp/__board_name__/memory/mmu.rs
- // Set the stack pointer. This ensures that any code in EL2 that needs the stack will work.
- ADR_REL x1, __boot_core_stack_end_exclusive
- mov sp, x1
+ // Load the _absolute_ addresses of the following symbols. Since the kernel is linked at
+ // the top of the 64 bit address space, these are effectively virtual addresses.
+ ADR_ABS x1, __boot_core_stack_end_exclusive
+ ADR_ABS x2, runtime_init
+
+ // Load the PC-relative address of the stack and set the stack pointer.
+ //
+ // Since _start() is the first function that runs after the firmware has loaded the kernel
+ // into memory, retrieving this symbol PC-relative returns the "physical" address.
+ //
+ // Setting the stack pointer to this value ensures that anything that still runs in EL2,
+ // until the kernel returns to EL1 with the MMU enabled, works as well. After the return to
+ // EL1, the virtual address of the stack retrieved above will be used.
+ ADR_REL x4, __boot_core_stack_end_exclusive
+ mov sp, x4
- // Jump to Rust code. x0 and x1 hold the function arguments provided to _start_rust().
+ // Jump to Rust code. x0, x1 and x2 hold the function arguments provided to _start_rust().
b _start_rust
// Infinitely wait for events (aka "park the core").
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu/translation_table.rs 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/memory/mmu/translation_table.rs
--- 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu/translation_table.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/memory/mmu/translation_table.rs
@@ -131,7 +131,7 @@
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
-pub struct FixedSizeTranslationTable<const NUM_TABLES: usize> {
+pub struct FixedSizeTranslationTable<const NUM_TABLES: usize, const START_FROM_TOP: bool> {
/// Page descriptors, covering 64 KiB windows per entry.
lvl3: [[PageDescriptor; 8192]; NUM_TABLES],
@@ -258,14 +258,23 @@
where
[u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized,
{
- type TableStartFromBottom = FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }>;
+ type TableStartFromTop =
+ FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, true>;
+
+ type TableStartFromBottom =
+ FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, false>;
}
-impl<const NUM_TABLES: usize> FixedSizeTranslationTable<NUM_TABLES> {
+impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
+ FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
+{
// Reserve the last 256 MiB of the address space for MMIO mappings.
const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1;
const L3_MMIO_START_INDEX: usize = 8192 / 2;
+ const START_FROM_TOP_OFFSET: Address<Virtual> =
+ Address::new((usize::MAX - (Granule512MiB::SIZE * NUM_TABLES)) + 1);
+
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
const fn _new(for_precompute: bool) -> Self {
@@ -294,20 +303,32 @@
/// The start address of the table's MMIO range.
#[inline(always)]
fn mmio_start_addr(&self) -> Address<Virtual> {
- Address::new(
+ let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT),
- )
+ );
+
+ if START_FROM_TOP {
+ addr += Self::START_FROM_TOP_OFFSET;
+ }
+
+ addr
}
/// The inclusive end address of the table's MMIO range.
#[inline(always)]
fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
- Address::new(
+ let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (8191 << Granule64KiB::SHIFT)
| (Granule64KiB::SIZE - 1),
- )
+ );
+
+ if START_FROM_TOP {
+ addr += Self::START_FROM_TOP_OFFSET;
+ }
+
+ addr
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
@@ -316,7 +337,12 @@
&self,
addr: *const Page<Virtual>,
) -> Result<(usize, usize), &'static str> {
- let addr = addr as usize;
+ let mut addr = addr as usize;
+
+ if START_FROM_TOP {
+ addr -= Self::START_FROM_TOP_OFFSET.into_usize()
+ }
+
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
@@ -343,8 +369,9 @@
// OS Interface Code
//------------------------------------------------------------------------------
-impl<const NUM_TABLES: usize> memory::mmu::translation_table::interface::TranslationTable
- for FixedSizeTranslationTable<NUM_TABLES>
+impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
+ memory::mmu::translation_table::interface::TranslationTable
+ for FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
{
fn init(&mut self) -> Result<(), &'static str> {
if self.initialized {
@@ -419,12 +446,16 @@
return Err("Not enough MMIO space left");
}
- let addr = Address::new(
+ let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT),
);
self.cur_l3_mmio_index += num_pages;
+ if START_FROM_TOP {
+ addr += Self::START_FROM_TOP_OFFSET;
+ }
+
Ok(PageSliceDescriptor::from_addr(addr, num_pages))
}
@@ -447,7 +478,7 @@
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
-pub type MinSizeTranslationTable = FixedSizeTranslationTable<1>;
+pub type MinSizeTranslationTable = FixedSizeTranslationTable<1, false>;
#[cfg(test)]
mod tests {
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu.rs 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/memory/mmu.rs
--- 15_virtual_mem_part3_precomputed_tables/src/_arch/aarch64/memory/mmu.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/_arch/aarch64/memory/mmu.rs
@@ -65,6 +65,7 @@
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
+ #[inline(always)]
fn set_up_mair(&self) {
// Define the memory types being mapped.
MAIR_EL1.write(
@@ -78,20 +79,21 @@
}
/// Configure various settings of stage 1 of the EL1 translation regime.
+ #[inline(always)]
fn configure_translation_control(&self) {
- let t0sz = (64 - bsp::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64;
+ let t1sz = (64 - bsp::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
- TCR_EL1::TBI0::Used
+ TCR_EL1::TBI1::Used
+ TCR_EL1::IPS::Bits_40
- + TCR_EL1::TG0::KiB_64
- + TCR_EL1::SH0::Inner
- + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
- + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
- + TCR_EL1::EPD0::EnableTTBR0Walks
- + TCR_EL1::A1::TTBR0
- + TCR_EL1::T0SZ.val(t0sz)
- + TCR_EL1::EPD1::DisableTTBR1Walks,
+ + TCR_EL1::TG1::KiB_64
+ + TCR_EL1::SH1::Inner
+ + TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ + TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ + TCR_EL1::EPD1::EnableTTBR1Walks
+ + TCR_EL1::A1::TTBR1
+ + TCR_EL1::T1SZ.val(t1sz)
+ + TCR_EL1::EPD0::DisableTTBR0Walks,
);
}
}
@@ -130,7 +132,7 @@
self.set_up_mair();
// Set the "Translation Table Base Register".
- TTBR0_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
+ TTBR1_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
self.configure_translation_control();
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/link.ld 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/link.ld
--- 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/link.ld
+++ 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/link.ld
@@ -6,6 +6,15 @@
/* This file provides __kernel_virt_addr_space_size */
INCLUDE src/bsp/raspberrypi/kernel_virt_addr_space_size.ld;
+/* The kernel's virtual address range will be:
+ *
+ * [END_ADDRESS_INCLUSIVE, START_ADDRESS]
+ * [u64::MAX , (u64::MAX - __kernel_virt_addr_space_size) + 1]
+ *
+ * Since the start address is needed to set the linker address below, calculate it now.
+ */
+__kernel_virt_start_addr = ((0xffffffffffffffff - __kernel_virt_addr_space_size) + 1);
+
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
@@ -19,13 +28,14 @@
SECTIONS
{
- . = __rpi_load_addr;
+ /* Add the load address as an offset. Makes virt-to-phys translation easier for the human eye */
+ . = __kernel_virt_start_addr + __rpi_load_addr;
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
- .text :
+ .text : AT(__rpi_load_addr)
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/memory/mmu.rs 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/memory/mmu.rs
--- 15_virtual_mem_part3_precomputed_tables/src/bsp/raspberrypi/memory/mmu.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/bsp/raspberrypi/memory/mmu.rs
@@ -23,7 +23,7 @@
//--------------------------------------------------------------------------------------------------
type KernelTranslationTable =
- <KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromBottom;
+ <KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromTop;
//--------------------------------------------------------------------------------------------------
// Public Definitions
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/memory/mmu.rs 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu.rs
--- 15_virtual_mem_part3_precomputed_tables/src/memory/mmu.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/memory/mmu.rs
@@ -80,6 +80,11 @@
pub trait AssociatedTranslationTable {
/// A translation table whose address range is:
///
+ /// [u64::MAX, (u64::MAX - AS_SIZE) + 1]
+ type TableStartFromTop;
+
+ /// A translation table whose address range is:
+ ///
/// [0, AS_SIZE - 1]
type TableStartFromBottom;
}
diff -uNr 15_virtual_mem_part3_precomputed_tables/src/runtime_init.rs 16_virtual_mem_part4_higher_half_kernel/src/runtime_init.rs
--- 15_virtual_mem_part3_precomputed_tables/src/runtime_init.rs
+++ 16_virtual_mem_part4_higher_half_kernel/src/runtime_init.rs
@@ -30,6 +30,7 @@
/// # Safety
///
/// - Only a single core must be active and running this function.
+#[no_mangle]
pub unsafe fn runtime_init() -> ! {
extern "Rust" {
fn kernel_init() -> !;
diff -uNr 15_virtual_mem_part3_precomputed_tables/tests/02_exception_sync_page_fault.rs 16_virtual_mem_part4_higher_half_kernel/tests/02_exception_sync_page_fault.rs
--- 15_virtual_mem_part3_precomputed_tables/tests/02_exception_sync_page_fault.rs
+++ 16_virtual_mem_part4_higher_half_kernel/tests/02_exception_sync_page_fault.rs
@@ -27,8 +27,8 @@
println!("Testing synchronous exception handling by causing a page fault");
println!("-------------------------------------------------------------------\n");
- println!("Writing beyond mapped area to address 9 GiB...");
- let big_addr: u64 = 9 * 1024 * 1024 * 1024;
+ println!("Writing to bottom of address space to address 1 GiB...");
+ let big_addr: u64 = 1 * 1024 * 1024 * 1024;
core::ptr::read_volatile(big_addr as *mut u64);
// If execution reaches here, the memory access above did not cause a page fault exception.
diff -uNr 15_virtual_mem_part3_precomputed_tables/translation_table_tool/bsp.rb 16_virtual_mem_part4_higher_half_kernel/translation_table_tool/bsp.rb
--- 15_virtual_mem_part3_precomputed_tables/translation_table_tool/bsp.rb
+++ 16_virtual_mem_part4_higher_half_kernel/translation_table_tool/bsp.rb
@@ -31,7 +31,7 @@
symbols = `#{NM_BINARY} --demangle #{kernel_elf}`.split("\n")
@kernel_virt_addr_space_size = parse_from_symbols(symbols, /__kernel_virt_addr_space_size/)
- @kernel_virt_start_addr = 0
+ @kernel_virt_start_addr = parse_from_symbols(symbols, /__kernel_virt_start_addr/)
@virt_addresses = parse_from_symbols(symbols, @virt_addresses)
@phys_addresses = virt_to_phys(@virt_addresses)
```

@ -0,0 +1,8 @@
use std::env;
fn main() {
let linker_file = env::var("LINKER_FILE").unwrap();
println!("cargo:rerun-if-changed={}", linker_file);
println!("cargo:rerun-if-changed=build.rs");
}

@ -0,0 +1,49 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Architectural processor code.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::cpu::arch_cpu
use cortex_a::asm;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
pub use asm::nop;
/// Pause execution on the core.
#[inline(always)]
pub fn wait_forever() -> ! {
loop {
asm::wfe()
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(feature = "test_build")]
use qemu_exit::QEMUExit;
#[cfg(feature = "test_build")]
const QEMU_EXIT_HANDLE: qemu_exit::AArch64 = qemu_exit::AArch64::new();
/// Make the host QEMU binary execute `exit(1)`.
#[cfg(feature = "test_build")]
pub fn qemu_exit_failure() -> ! {
QEMU_EXIT_HANDLE.exit_failure()
}
/// Make the host QEMU binary execute `exit(0)`.
#[cfg(feature = "test_build")]
pub fn qemu_exit_success() -> ! {
QEMU_EXIT_HANDLE.exit_success()
}

@ -0,0 +1,97 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//! Architectural boot code.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::cpu::boot::arch_boot
use crate::{cpu, memory, memory::Address};
use core::intrinsics::unlikely;
use cortex_a::{asm, regs::*};
// Assembly counterpart to this file.
global_asm!(include_str!("boot.s"));
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Prepares the transition from EL2 to EL1.
///
/// # Safety
///
/// - The `bss` section is not initialized yet. The code must not use or reference it in any way.
/// - The HW state of EL1 must be prepared in a sound way.
#[inline(always)]
unsafe fn prepare_el2_to_el1_transition(
virt_boot_core_stack_end_exclusive_addr: u64,
virt_runtime_init_addr: u64,
) {
// Enable timer counter registers for EL1.
CNTHCTL_EL2.write(CNTHCTL_EL2::EL1PCEN::SET + CNTHCTL_EL2::EL1PCTEN::SET);
// No offset for reading the counters.
CNTVOFF_EL2.set(0);
// Set EL1 execution state to AArch64.
HCR_EL2.write(HCR_EL2::RW::EL1IsAarch64);
// Set up a simulated exception return.
//
// First, fake a saved program status where all interrupts were masked and SP_EL1 was used as a
// stack pointer.
SPSR_EL2.write(
SPSR_EL2::D::Masked
+ SPSR_EL2::A::Masked
+ SPSR_EL2::I::Masked
+ SPSR_EL2::F::Masked
+ SPSR_EL2::M::EL1h,
);
// Second, let the link register point to runtime_init().
ELR_EL2.set(virt_runtime_init_addr);
// Set up SP_EL1 (stack pointer), which will be used by EL1 once we "return" to it. Since there
// are no plans to ever return to EL2, just re-use the same stack.
SP_EL1.set(virt_boot_core_stack_end_exclusive_addr);
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// The Rust entry of the `kernel` binary.
///
/// The function is called from the assembly `_start` function.
///
/// # Safety
///
/// - The `bss` section is not initialized yet. The code must not use or reference it in any way.
/// - Exception return from EL2 must must continue execution in EL1 with `runtime_init()`.
#[no_mangle]
pub unsafe extern "C" fn _start_rust(
phys_kernel_tables_base_addr: u64,
virt_boot_core_stack_end_exclusive_addr: u64,
virt_runtime_init_addr: u64,
) -> ! {
prepare_el2_to_el1_transition(
virt_boot_core_stack_end_exclusive_addr,
virt_runtime_init_addr,
);
// Turn on the MMU for EL1.
let addr = Address::new(phys_kernel_tables_base_addr as usize);
if unlikely(memory::mmu::enable_mmu_and_caching(addr).is_err()) {
cpu::wait_forever();
}
// Use `eret` to "return" to EL1. Since virtual memory will already be enabled, this results in
// execution of runtime_init() in EL1 from its _virtual address_.
asm::eret()
}

@ -0,0 +1,87 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm
// Load the address of a symbol into a register, absolute.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_ABS register, symbol
movz \register, #:abs_g3:\symbol
movk \register, #:abs_g2_nc:\symbol
movk \register, #:abs_g1_nc:\symbol
movk \register, #:abs_g0_nc:\symbol
.endm
.equ _EL2, 0x8
.equ _core_id_mask, 0b11
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
.section .text._start
//------------------------------------------------------------------------------
// fn _start()
//------------------------------------------------------------------------------
_start:
// Only proceed if the core executes in EL2. Park it otherwise.
mrs x0, CurrentEL
cmp x0, _EL2
b.ne 1f
// Only proceed on the boot core. Park it otherwise.
mrs x1, MPIDR_EL1
and x1, x1, _core_id_mask
ldr x2, BOOT_CORE_ID // provided by bsp/__board_name__/cpu.rs
cmp x1, x2
b.ne 1f
// If execution reaches here, it is the boot core. Now, prepare the jump to Rust code.
// Load the base address of the kernel's translation tables.
ldr x0, PHYS_KERNEL_TABLES_BASE_ADDR // provided by bsp/__board_name__/memory/mmu.rs
// Load the _absolute_ addresses of the following symbols. Since the kernel is linked at
// the top of the 64 bit address space, these are effectively virtual addresses.
ADR_ABS x1, __boot_core_stack_end_exclusive
ADR_ABS x2, runtime_init
// Load the PC-relative address of the stack and set the stack pointer.
//
// Since _start() is the first function that runs after the firmware has loaded the kernel
// into memory, retrieving this symbol PC-relative returns the "physical" address.
//
// Setting the stack pointer to this value ensures that anything that still runs in EL2,
// until the kernel returns to EL1 with the MMU enabled, works as well. After the return to
// EL1, the virtual address of the stack retrieved above will be used.
ADR_REL x4, __boot_core_stack_end_exclusive
mov sp, x4
// Jump to Rust code. x0, x1 and x2 hold the function arguments provided to _start_rust().
b _start_rust
// Infinitely wait for events (aka "park the core").
1: wfe
b 1b
.size _start, . - _start
.type _start, function
.global _start

@ -0,0 +1,29 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Architectural symmetric multiprocessing.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::cpu::smp::arch_smp
use cortex_a::regs::*;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return the executing core's id.
#[inline(always)]
pub fn core_id<T>() -> T
where
T: From<u8>,
{
const CORE_MASK: u64 = 0b11;
T::from((MPIDR_EL1.get() & CORE_MASK) as u8)
}

@ -0,0 +1,281 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Architectural synchronous and asynchronous exception handling.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::exception::arch_exception
use crate::{
bsp::{self},
exception,
memory::Address,
};
use core::{cell::UnsafeCell, fmt};
use cortex_a::{barrier, regs::*};
use register::InMemoryRegister;
// Assembly counterpart to this file.
global_asm!(include_str!("exception.s"));
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Wrapper struct for memory copy of SPSR_EL1.
#[repr(transparent)]
struct SpsrEL1(InMemoryRegister<u64, SPSR_EL1::Register>);
/// The exception context as it is stored on the stack on exception entry.
#[repr(C)]
struct ExceptionContext {
/// General Purpose Registers.
gpr: [u64; 30],
/// The link register, aka x30.
lr: u64,
/// Exception link register. The program counter at the time the exception happened.
elr_el1: u64,
/// Saved program status.
spsr_el1: SpsrEL1,
}
/// Wrapper struct for pretty printing ESR_EL1.
struct EsrEL1;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Check if additional context can be derived from a data abort.
fn inspect_data_abort(f: &mut fmt::Formatter) -> fmt::Result {
let fault_addr = Address::new(FAR_EL1.get() as usize);
if bsp::memory::mmu::virt_boot_core_stack_guard_page_desc().contains(fault_addr) {
writeln!(
f,
"\n\n >> Attempted to access the guard page of the kernel's boot core stack <<"
)?;
}
Ok(())
}
/// Prints verbose information about the exception and then panics.
fn default_exception_handler(e: &ExceptionContext) {
panic!(
"\n\nCPU Exception!\n\
FAR_EL1: {:#018x}\n\
{}\n\
{}",
FAR_EL1.get(),
EsrEL1 {},
e
);
}
//------------------------------------------------------------------------------
// Current, EL0
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn current_el0_synchronous(_e: &mut ExceptionContext) {
panic!("Should not be here. Use of SP_EL0 in EL1 is not supported.")
}
#[no_mangle]
unsafe extern "C" fn current_el0_irq(_e: &mut ExceptionContext) {
panic!("Should not be here. Use of SP_EL0 in EL1 is not supported.")
}
#[no_mangle]
unsafe extern "C" fn current_el0_serror(_e: &mut ExceptionContext) {
panic!("Should not be here. Use of SP_EL0 in EL1 is not supported.")
}
//------------------------------------------------------------------------------
// Current, ELx
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn current_elx_irq(_e: &mut ExceptionContext) {
use exception::asynchronous::interface::IRQManager;
let token = &exception::asynchronous::IRQContext::new();
bsp::exception::asynchronous::irq_manager().handle_pending_irqs(token);
}
#[no_mangle]
unsafe extern "C" fn current_elx_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//------------------------------------------------------------------------------
// Lower, AArch64
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn lower_aarch64_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch64_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch64_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//------------------------------------------------------------------------------
// Lower, AArch32
//------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn lower_aarch32_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch32_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch32_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//------------------------------------------------------------------------------
// Pretty printing
//------------------------------------------------------------------------------
/// Human readable ESR_EL1.
#[rustfmt::skip]
impl fmt::Display for EsrEL1 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let esr_el1 = ESR_EL1.extract();
// Raw print of whole register.
writeln!(f, "ESR_EL1: {:#010x}", esr_el1.get())?;
// Raw print of exception class.
write!(f, " Exception Class (EC) : {:#x}", esr_el1.read(ESR_EL1::EC))?;
// Exception class, translation.
let ec_translation = match esr_el1.read_as_enum(ESR_EL1::EC) {
Some(ESR_EL1::EC::Value::DataAbortCurrentEL) => "Data Abort, current EL",
_ => "N/A",
};
writeln!(f, " - {}", ec_translation)?;
// Raw print of instruction specific syndrome.
write!(f, " Instr Specific Syndrome (ISS): {:#x}", esr_el1.read(ESR_EL1::ISS))?;
inspect_data_abort(f)
}
}
/// Human readable SPSR_EL1.
#[rustfmt::skip]
impl fmt::Display for SpsrEL1 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Raw value.
writeln!(f, "SPSR_EL1: {:#010x}", self.0.get())?;
let to_flag_str = |x| -> _ {
if x { "Set" } else { "Not set" }
};
writeln!(f, " Flags:")?;
writeln!(f, " Negative (N): {}", to_flag_str(self.0.is_set(SPSR_EL1::N)))?;
writeln!(f, " Zero (Z): {}", to_flag_str(self.0.is_set(SPSR_EL1::Z)))?;
writeln!(f, " Carry (C): {}", to_flag_str(self.0.is_set(SPSR_EL1::C)))?;
writeln!(f, " Overflow (V): {}", to_flag_str(self.0.is_set(SPSR_EL1::V)))?;
let to_mask_str = |x| -> _ {
if x { "Masked" } else { "Unmasked" }
};
writeln!(f, " Exception handling state:")?;
writeln!(f, " Debug (D): {}", to_mask_str(self.0.is_set(SPSR_EL1::D)))?;
writeln!(f, " SError (A): {}", to_mask_str(self.0.is_set(SPSR_EL1::A)))?;
writeln!(f, " IRQ (I): {}", to_mask_str(self.0.is_set(SPSR_EL1::I)))?;
writeln!(f, " FIQ (F): {}", to_mask_str(self.0.is_set(SPSR_EL1::F)))?;
write!(f, " Illegal Execution State (IL): {}",
to_flag_str(self.0.is_set(SPSR_EL1::IL))
)
}
}
/// Human readable print of the exception context.
impl fmt::Display for ExceptionContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "ELR_EL1: {:#018x}", self.elr_el1)?;
writeln!(f, "{}", self.spsr_el1)?;
writeln!(f)?;
writeln!(f, "General purpose register:")?;
#[rustfmt::skip]
let alternating = |x| -> _ {
if x % 2 == 0 { " " } else { "\n" }
};
// Print two registers per line.
for (i, reg) in self.gpr.iter().enumerate() {
write!(f, " x{: <2}: {: >#018x}{}", i, reg, alternating(i))?;
}
write!(f, " lr : {:#018x}", self.lr)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use crate::exception::PrivilegeLevel;
/// The processing element's current privilege level.
pub fn current_privilege_level() -> (PrivilegeLevel, &'static str) {
let el = CurrentEL.read_as_enum(CurrentEL::EL);
match el {
Some(CurrentEL::EL::Value::EL2) => (PrivilegeLevel::Hypervisor, "EL2"),
Some(CurrentEL::EL::Value::EL1) => (PrivilegeLevel::Kernel, "EL1"),
Some(CurrentEL::EL::Value::EL0) => (PrivilegeLevel::User, "EL0"),
_ => (PrivilegeLevel::Unknown, "Unknown"),
}
}
/// Init exception handling by setting the exception vector base address register.
///
/// # Safety
///
/// - Changes the HW state of the executing core.
/// - The vector table and the symbol `__exception_vector_table_start` from the linker script must
/// adhere to the alignment and size constraints demanded by the ARMv8-A Architecture Reference
/// Manual.
pub unsafe fn handling_init() {
// Provided by exception.S.
extern "Rust" {
static __exception_vector_start: UnsafeCell<()>;
}
VBAR_EL1.set(__exception_vector_start.get() as u64);
// Force VBAR update to complete before next instruction.
barrier::isb(barrier::SY);
}

@ -0,0 +1,148 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//--------------------------------------------------------------------------------------------------
// Definitions
//--------------------------------------------------------------------------------------------------
/// Call the function provided by parameter `\handler` after saving the exception context. Provide
/// the context as the first parameter to '\handler'.
.macro CALL_WITH_CONTEXT handler
// Make room on the stack for the exception context.
sub sp, sp, #16 * 17
// Store all general purpose registers on the stack.
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
// Add the exception link register (ELR_EL1) and the saved program status (SPSR_EL1).
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
stp lr, x1, [sp, #16 * 15]
str x2, [sp, #16 * 16]
// x0 is the first argument for the function called through `\handler`.
mov x0, sp
// Call `\handler`.
bl \handler
// After returning from exception handling code, replay the saved context and return via
// `eret`.
b __exception_restore_context
.endm
.macro FIQ_SUSPEND
1: wfe
b 1b
.endm
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
.section .text
//------------------------------------------------------------------------------
// The exception vector table.
//------------------------------------------------------------------------------
// Align by 2^11 bytes, as demanded by ARMv8-A. Same as ALIGN(2048) in an ld script.
.align 11
// Export a symbol for the Rust code to use.
__exception_vector_start:
// Current exception level with SP_EL0.
//
// .org sets the offset relative to section start.
//
// # Safety
//
// - It must be ensured that `CALL_WITH_CONTEXT` <= 0x80 bytes.
.org 0x000
CALL_WITH_CONTEXT current_el0_synchronous
.org 0x080
CALL_WITH_CONTEXT current_el0_irq
.org 0x100
FIQ_SUSPEND
.org 0x180
CALL_WITH_CONTEXT current_el0_serror
// Current exception level with SP_ELx, x > 0.
.org 0x200
CALL_WITH_CONTEXT current_elx_synchronous
.org 0x280
CALL_WITH_CONTEXT current_elx_irq
.org 0x300
FIQ_SUSPEND
.org 0x380
CALL_WITH_CONTEXT current_elx_serror
// Lower exception level, AArch64
.org 0x400
CALL_WITH_CONTEXT lower_aarch64_synchronous
.org 0x480
CALL_WITH_CONTEXT lower_aarch64_irq
.org 0x500
FIQ_SUSPEND
.org 0x580
CALL_WITH_CONTEXT lower_aarch64_serror
// Lower exception level, AArch32
.org 0x600
CALL_WITH_CONTEXT lower_aarch32_synchronous
.org 0x680
CALL_WITH_CONTEXT lower_aarch32_irq
.org 0x700
FIQ_SUSPEND
.org 0x780
CALL_WITH_CONTEXT lower_aarch32_serror
.org 0x800
//------------------------------------------------------------------------------
// fn __exception_restore_context()
//------------------------------------------------------------------------------
__exception_restore_context:
ldr w19, [sp, #16 * 16]
ldp lr, x20, [sp, #16 * 15]
msr SPSR_EL1, x19
msr ELR_EL1, x20
ldp x0, x1, [sp, #16 * 0]
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
ldp x8, x9, [sp, #16 * 4]
ldp x10, x11, [sp, #16 * 5]
ldp x12, x13, [sp, #16 * 6]
ldp x14, x15, [sp, #16 * 7]
ldp x16, x17, [sp, #16 * 8]
ldp x18, x19, [sp, #16 * 9]
ldp x20, x21, [sp, #16 * 10]
ldp x22, x23, [sp, #16 * 11]
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
add sp, sp, #16 * 17
eret
.size __exception_restore_context, . - __exception_restore_context
.type __exception_restore_context, function

@ -0,0 +1,150 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Architectural asynchronous exception handling.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::exception::asynchronous::arch_asynchronous
use cortex_a::regs::*;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
mod daif_bits {
pub const IRQ: u8 = 0b0010;
}
trait DaifField {
fn daif_field() -> register::Field<u64, DAIF::Register>;
}
struct Debug;
struct SError;
struct IRQ;
struct FIQ;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl DaifField for Debug {
fn daif_field() -> register::Field<u64, DAIF::Register> {
DAIF::D
}
}
impl DaifField for SError {
fn daif_field() -> register::Field<u64, DAIF::Register> {
DAIF::A
}
}
impl DaifField for IRQ {
fn daif_field() -> register::Field<u64, DAIF::Register> {
DAIF::I
}
}
impl DaifField for FIQ {
fn daif_field() -> register::Field<u64, DAIF::Register> {
DAIF::F
}
}
fn is_masked<T>() -> bool
where
T: DaifField,
{
DAIF.is_set(T::daif_field())
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Returns whether IRQs are masked on the executing core.
pub fn is_local_irq_masked() -> bool {
!is_masked::<IRQ>()
}
/// Unmask IRQs on the executing core.
///
/// It is not needed to place an explicit instruction synchronization barrier after the `msr`.
/// Quoting the Architecture Reference Manual for ARMv8-A, section C5.1.3:
///
/// "Writes to PSTATE.{PAN, D, A, I, F} occur in program order without the need for additional
/// synchronization."
///
/// # Safety
///
/// - Changes the HW state of the executing core.
#[inline(always)]
pub unsafe fn local_irq_unmask() {
#[rustfmt::skip]
asm!(
"msr DAIFClr, {arg}",
arg = const daif_bits::IRQ,
options(nomem, nostack, preserves_flags)
);
}
/// Mask IRQs on the executing core.
///
/// # Safety
///
/// - Changes the HW state of the executing core.
#[inline(always)]
pub unsafe fn local_irq_mask() {
#[rustfmt::skip]
asm!(
"msr DAIFSet, {arg}",
arg = const daif_bits::IRQ,
options(nomem, nostack, preserves_flags)
);
}
/// Mask IRQs on the executing core and return the previously saved interrupt mask bits (DAIF).
///
/// # Safety
///
/// - Changes the HW state of the executing core.
#[inline(always)]
pub unsafe fn local_irq_mask_save() -> u64 {
let saved = DAIF.get();
local_irq_mask();
saved
}
/// Restore the interrupt mask bits (DAIF) using the callee's argument.
///
/// # Safety
///
/// - Changes the HW state of the executing core.
/// - No sanity checks on the input.
#[inline(always)]
pub unsafe fn local_irq_restore(saved: u64) {
DAIF.set(saved);
}
/// Print the AArch64 exceptions status.
#[rustfmt::skip]
pub fn print_state() {
use crate::info;
let to_mask_str = |x| -> _ {
if x { "Masked" } else { "Unmasked" }
};
info!(" Debug: {}", to_mask_str(is_masked::<Debug>()));
info!(" SError: {}", to_mask_str(is_masked::<SError>()));
info!(" IRQ: {}", to_mask_str(is_masked::<IRQ>()));
info!(" FIQ: {}", to_mask_str(is_masked::<FIQ>()));
}

@ -0,0 +1,184 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management Unit Driver.
//!
//! Only 64 KiB granule is supported.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::memory::mmu::arch_mmu
use crate::{
bsp, memory,
memory::{mmu::TranslationGranule, Address, Physical, Virtual},
};
use core::intrinsics::unlikely;
use cortex_a::{barrier, regs::*};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Memory Management Unit type.
struct MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>;
pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>;
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
pub mod mair {
pub const DEVICE: u64 = 0;
pub const NORMAL: u64 = 1;
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static MMU: MemoryManagementUnit = MemoryManagementUnit;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AddressSpace<AS_SIZE> {
/// Checks for architectural restrictions.
pub const fn arch_address_space_size_sanity_checks() {
// Size must be at least one full 512 MiB table.
assert!((AS_SIZE % Granule512MiB::SIZE) == 0);
// Check for 48 bit virtual address size as maximum, which is supported by any ARMv8
// version.
assert!(AS_SIZE <= (1 << 48));
}
}
impl MemoryManagementUnit {
/// Setup function for the MAIR_EL1 register.
#[inline(always)]
fn set_up_mair(&self) {
// Define the memory types being mapped.
MAIR_EL1.write(
// Attribute 1 - Cacheable normal DRAM.
MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc +
MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc +
// Attribute 0 - Device.
MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck,
);
}
/// Configure various settings of stage 1 of the EL1 translation regime.
#[inline(always)]
fn configure_translation_control(&self) {
let t1sz = (64 - bsp::memory::mmu::KernelVirtAddrSpace::SIZE_SHIFT) as u64;
TCR_EL1.write(
TCR_EL1::TBI1::Used
+ TCR_EL1::IPS::Bits_40
+ TCR_EL1::TG1::KiB_64
+ TCR_EL1::SH1::Inner
+ TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD1::EnableTTBR1Walks
+ TCR_EL1::A1::TTBR1
+ TCR_EL1::T1SZ.val(t1sz)
+ TCR_EL1::EPD0::DisableTTBR0Walks,
);
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the MMU instance.
pub fn mmu() -> &'static impl memory::mmu::interface::MMU {
&MMU
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use memory::mmu::{MMUEnableError, TranslationError};
impl memory::mmu::interface::MMU for MemoryManagementUnit {
unsafe fn enable_mmu_and_caching(
&self,
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
if unlikely(self.is_enabled()) {
return Err(MMUEnableError::AlreadyEnabled);
}
// Fail early if translation granule is not supported.
if unlikely(!ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported)) {
return Err(MMUEnableError::Other(
"Translation granule not supported in HW",
));
}
// Prepare the memory attribute indirection register.
self.set_up_mair();
// Set the "Translation Table Base Register".
TTBR1_EL1.set_baddr(phys_tables_base_addr.into_usize() as u64);
self.configure_translation_control();
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
barrier::isb(barrier::SY);
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction.
barrier::isb(barrier::SY);
Ok(())
}
#[inline(always)]
fn is_enabled(&self) -> bool {
SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable)
}
fn try_virt_to_phys(
&self,
virt: Address<Virtual>,
) -> Result<Address<Physical>, TranslationError> {
if !self.is_enabled() {
return Err(TranslationError::MMUDisabled);
}
let addr = virt.into_usize() as u64;
unsafe {
asm!(
"AT S1E1R, {0}",
in(reg) addr,
options(readonly, nostack, preserves_flags)
);
}
let par_el1 = PAR_EL1.extract();
if par_el1.matches_all(PAR_EL1::F::TranslationAborted) {
return Err(TranslationError::Aborted);
}
let phys_addr = (par_el1.read(PAR_EL1::PA) << 12) | (addr & 0xFFF);
Ok(Address::new(phys_addr as usize))
}
}

@ -0,0 +1,505 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//! Architectural translation table.
//!
//! Only 64 KiB granule is supported.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::memory::mmu::translation_table::arch_translation_table
use crate::{
bsp, memory,
memory::{
mmu::{
arch_mmu::{Granule512MiB, Granule64KiB},
AccessPermissions, AttributeFields, MemAttributes, Page, PageSliceDescriptor,
},
Address, Physical, Virtual,
},
};
use core::convert::{self, TryInto};
use register::{register_bitfields, InMemoryRegister};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// A table descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-15.
register_bitfields! {u64,
STAGE1_TABLE_DESCRIPTOR [
/// Physical address of the next descriptor.
NEXT_LEVEL_TABLE_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
// A level 3 page descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-17.
register_bitfields! {u64,
STAGE1_PAGE_DESCRIPTOR [
/// Unprivileged execute-never.
UXN OFFSET(54) NUMBITS(1) [
False = 0,
True = 1
],
/// Privileged execute-never.
PXN OFFSET(53) NUMBITS(1) [
False = 0,
True = 1
],
/// Physical address of the next table descriptor (lvl2) or the page descriptor (lvl3).
OUTPUT_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
/// Access flag.
AF OFFSET(10) NUMBITS(1) [
False = 0,
True = 1
],
/// Shareability field.
SH OFFSET(8) NUMBITS(2) [
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Access Permissions.
AP OFFSET(6) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
/// Memory attributes index into the MAIR_EL1 register.
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Reserved_Invalid = 0,
Page = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
/// A table descriptor for 64 KiB aperture.
///
/// The output points to the next table.
#[derive(Copy, Clone)]
#[repr(C)]
struct TableDescriptor {
value: u64,
}
/// A page descriptor with 64 KiB aperture.
///
/// The output points to physical memory.
#[derive(Copy, Clone)]
#[repr(C)]
struct PageDescriptor {
value: u64,
}
trait StartAddr {
fn virt_start_addr(&self) -> Address<Virtual>;
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB
/// aligned, so the lvl3 is put first.
#[repr(C)]
#[repr(align(65536))]
pub struct FixedSizeTranslationTable<const NUM_TABLES: usize, const START_FROM_TOP: bool> {
/// Page descriptors, covering 64 KiB windows per entry.
lvl3: [[PageDescriptor; 8192]; NUM_TABLES],
/// Table descriptors, covering 512 MiB windows.
lvl2: [TableDescriptor; NUM_TABLES],
/// Index of the next free MMIO page.
cur_l3_mmio_index: usize,
/// Have the tables been initialized?
initialized: bool,
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl<T, const N: usize> StartAddr for [T; N] {
fn virt_start_addr(&self) -> Address<Virtual> {
Address::new(self as *const _ as usize)
}
}
impl TableDescriptor {
/// Create an instance.
///
/// Descriptor is invalid by default.
pub const fn new_zeroed() -> Self {
Self { value: 0 }
}
/// Create an instance pointing to the supplied address.
pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: Address<Physical>) -> Self {
let val = InMemoryRegister::<u64, STAGE1_TABLE_DESCRIPTOR::Register>::new(0);
let shifted = phys_next_lvl_table_addr.into_usize() >> Granule64KiB::SHIFT;
val.write(
STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64)
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::VALID::True,
);
TableDescriptor { value: val.get() }
}
}
/// Convert the kernel's generic memory attributes to HW-specific attributes of the MMU.
impl convert::From<AttributeFields>
for register::FieldValue<u64, STAGE1_PAGE_DESCRIPTOR::Register>
{
fn from(attribute_fields: AttributeFields) -> Self {
// Memory attributes.
let mut desc = match attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => {
STAGE1_PAGE_DESCRIPTOR::SH::InnerShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(memory::mmu::arch_mmu::mair::NORMAL)
}
MemAttributes::Device => {
STAGE1_PAGE_DESCRIPTOR::SH::OuterShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(memory::mmu::arch_mmu::mair::DEVICE)
}
};
// Access Permissions.
desc += match attribute_fields.acc_perms {
AccessPermissions::ReadOnly => STAGE1_PAGE_DESCRIPTOR::AP::RO_EL1,
AccessPermissions::ReadWrite => STAGE1_PAGE_DESCRIPTOR::AP::RW_EL1,
};
// The execute-never attribute is mapped to PXN in AArch64.
desc += if attribute_fields.execute_never {
STAGE1_PAGE_DESCRIPTOR::PXN::True
} else {
STAGE1_PAGE_DESCRIPTOR::PXN::False
};
// Always set unprivileged exectue-never as long as userspace is not implemented yet.
desc += STAGE1_PAGE_DESCRIPTOR::UXN::True;
desc
}
}
impl PageDescriptor {
/// Create an instance.
///
/// Descriptor is invalid by default.
pub const fn new_zeroed() -> Self {
Self { value: 0 }
}
/// Create an instance.
pub fn from_output_addr(
phys_output_addr: *const Page<Physical>,
attribute_fields: &AttributeFields,
) -> Self {
let val = InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(0);
let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT;
val.write(
STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted)
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Page
+ STAGE1_PAGE_DESCRIPTOR::VALID::True
+ attribute_fields.clone().into(),
);
Self { value: val.get() }
}
/// Returns the valid bit.
fn is_valid(&self) -> bool {
InMemoryRegister::<u64, STAGE1_PAGE_DESCRIPTOR::Register>::new(self.value)
.is_set(STAGE1_PAGE_DESCRIPTOR::VALID)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<const AS_SIZE: usize> memory::mmu::AssociatedTranslationTable
for memory::mmu::AddressSpace<AS_SIZE>
where
[u8; Self::SIZE >> Granule512MiB::SHIFT]: Sized,
{
type TableStartFromTop =
FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, true>;
type TableStartFromBottom =
FixedSizeTranslationTable<{ Self::SIZE >> Granule512MiB::SHIFT }, false>;
}
impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
{
// Reserve the last 256 MiB of the address space for MMIO mappings.
const L2_MMIO_START_INDEX: usize = NUM_TABLES - 1;
const L3_MMIO_START_INDEX: usize = 8192 / 2;
const START_FROM_TOP_OFFSET: Address<Virtual> =
Address::new((usize::MAX - (Granule512MiB::SIZE * NUM_TABLES)) + 1);
/// Create an instance.
#[allow(clippy::assertions_on_constants)]
const fn _new(for_precompute: bool) -> Self {
assert!(bsp::memory::mmu::KernelGranule::SIZE == Granule64KiB::SIZE);
// Can't have a zero-sized address space.
assert!(NUM_TABLES > 0);
Self {
lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES],
lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES],
cur_l3_mmio_index: Self::L3_MMIO_START_INDEX,
initialized: for_precompute,
}
}
pub const fn new_for_precompute() -> Self {
Self::_new(true)
}
#[cfg(test)]
pub fn new_for_runtime() -> Self {
Self::_new(false)
}
/// The start address of the table's MMIO range.
#[inline(always)]
fn mmio_start_addr(&self) -> Address<Virtual> {
let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (Self::L3_MMIO_START_INDEX << Granule64KiB::SHIFT),
);
if START_FROM_TOP {
addr += Self::START_FROM_TOP_OFFSET;
}
addr
}
/// The inclusive end address of the table's MMIO range.
#[inline(always)]
fn mmio_end_addr_inclusive(&self) -> Address<Virtual> {
let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (8191 << Granule64KiB::SHIFT)
| (Granule64KiB::SIZE - 1),
);
if START_FROM_TOP {
addr += Self::START_FROM_TOP_OFFSET;
}
addr
}
/// Helper to calculate the lvl2 and lvl3 indices from an address.
#[inline(always)]
fn lvl2_lvl3_index_from(
&self,
addr: *const Page<Virtual>,
) -> Result<(usize, usize), &'static str> {
let mut addr = addr as usize;
if START_FROM_TOP {
addr -= Self::START_FROM_TOP_OFFSET.into_usize()
}
let lvl2_index = addr >> Granule512MiB::SHIFT;
let lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT;
if lvl2_index > (NUM_TABLES - 1) {
return Err("Virtual page is out of bounds of translation table");
}
Ok((lvl2_index, lvl3_index))
}
/// Returns the PageDescriptor corresponding to the supplied Page.
#[inline(always)]
fn page_descriptor_from(
&mut self,
addr: *const Page<Virtual>,
) -> Result<&mut PageDescriptor, &'static str> {
let (lvl2_index, lvl3_index) = self.lvl2_lvl3_index_from(addr)?;
Ok(&mut self.lvl3[lvl2_index][lvl3_index])
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
impl<const NUM_TABLES: usize, const START_FROM_TOP: bool>
memory::mmu::translation_table::interface::TranslationTable
for FixedSizeTranslationTable<NUM_TABLES, START_FROM_TOP>
{
fn init(&mut self) -> Result<(), &'static str> {
if self.initialized {
return Ok(());
}
// Populate the l2 entries.
for (lvl2_nr, lvl2_entry) in self.lvl2.iter_mut().enumerate() {
let addr = self.lvl3[lvl2_nr]
.virt_start_addr()
.try_into()
.map_err(|_| "Translation error")?;
let desc = TableDescriptor::from_next_lvl_table_addr(addr);
*lvl2_entry = desc;
}
self.cur_l3_mmio_index = Self::L3_MMIO_START_INDEX;
self.initialized = true;
Ok(())
}
unsafe fn map_pages_at(
&mut self,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
assert_eq!(self.initialized, true, "Translation tables not initialized");
let p = phys_pages.as_slice();
let v = virt_pages.as_slice();
// No work to do for empty slices.
if v.is_empty() {
return Ok(());
}
if v.len() != p.len() {
return Err("Tried to map page slices with unequal sizes");
}
if p.last().unwrap().as_ptr() >= bsp::memory::mmu::phys_addr_space_end_page() {
return Err("Tried to map outside of physical address space");
}
let iter = p.iter().zip(v.iter());
for (phys_page, virt_page) in iter {
let page_descriptor = self.page_descriptor_from(virt_page.as_ptr())?;
if page_descriptor.is_valid() {
return Err("Virtual page is already mapped");
}
*page_descriptor = PageDescriptor::from_output_addr(phys_page.as_ptr(), &attr);
}
Ok(())
}
fn next_mmio_virt_page_slice(
&mut self,
num_pages: usize,
) -> Result<PageSliceDescriptor<Virtual>, &'static str> {
assert_eq!(self.initialized, true, "Translation tables not initialized");
if num_pages == 0 {
return Err("num_pages == 0");
}
if (self.cur_l3_mmio_index + num_pages) > 8191 {
return Err("Not enough MMIO space left");
}
let mut addr = Address::new(
(Self::L2_MMIO_START_INDEX << Granule512MiB::SHIFT)
| (self.cur_l3_mmio_index << Granule64KiB::SHIFT),
);
self.cur_l3_mmio_index += num_pages;
if START_FROM_TOP {
addr += Self::START_FROM_TOP_OFFSET;
}
Ok(PageSliceDescriptor::from_addr(addr, num_pages))
}
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool {
let start_addr = virt_pages.start_addr();
let end_addr_inclusive = virt_pages.end_addr_inclusive();
for i in [start_addr, end_addr_inclusive].iter() {
if (*i >= self.mmio_start_addr()) && (*i <= self.mmio_end_addr_inclusive()) {
return true;
}
}
false
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
pub type MinSizeTranslationTable = FixedSizeTranslationTable<1, false>;
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check if the size of `struct TableDescriptor` is as expected.
#[kernel_test]
fn size_of_tabledescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<TableDescriptor>(),
core::mem::size_of::<u64>()
);
}
/// Check if the size of `struct PageDescriptor` is as expected.
#[kernel_test]
fn size_of_pagedescriptor_equals_64_bit() {
assert_eq!(
core::mem::size_of::<PageDescriptor>(),
core::mem::size_of::<u64>()
);
}
}

@ -0,0 +1,118 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Architectural timer primitives.
//!
//! # Orientation
//!
//! Since arch modules are imported into generic modules using the path attribute, the path of this
//! file is:
//!
//! crate::time::arch_time
use crate::{time, warn};
use core::time::Duration;
use cortex_a::{barrier, regs::*};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
const NS_PER_S: u64 = 1_000_000_000;
/// ARMv8 Generic Timer.
struct GenericTimer;
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static TIME_MANAGER: GenericTimer = GenericTimer;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl GenericTimer {
#[inline(always)]
fn read_cntpct(&self) -> u64 {
// Prevent that the counter is read ahead of time due to out-of-order execution.
unsafe { barrier::isb(barrier::SY) };
CNTPCT_EL0.get()
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the time manager.
pub fn time_manager() -> &'static impl time::interface::TimeManager {
&TIME_MANAGER
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
impl time::interface::TimeManager for GenericTimer {
fn resolution(&self) -> Duration {
Duration::from_nanos(NS_PER_S / (CNTFRQ_EL0.get() as u64))
}
fn uptime(&self) -> Duration {
let current_count: u64 = self.read_cntpct() * NS_PER_S;
let frq: u64 = CNTFRQ_EL0.get() as u64;
Duration::from_nanos(current_count / frq)
}
fn spin_for(&self, duration: Duration) {
// Instantly return on zero.
if duration.as_nanos() == 0 {
return;
}
// Calculate the register compare value.
let frq = CNTFRQ_EL0.get();
let x = match frq.checked_mul(duration.as_nanos() as u64) {
None => {
warn!("Spin duration too long, skipping");
return;
}
Some(val) => val,
};
let tval = x / NS_PER_S;
// Check if it is within supported bounds.
let warn: Option<&str> = if tval == 0 {
Some("smaller")
// The upper 32 bits of CNTP_TVAL_EL0 are reserved.
} else if tval > u32::max_value().into() {
Some("bigger")
} else {
None
};
if let Some(w) = warn {
warn!(
"Spin duration {} than architecturally supported, skipping",
w
);
return;
}
// Set the compare value register.
CNTP_TVAL_EL0.set(tval);
// Kick off the counting. // Disable timer interrupt.
CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::SET);
// ISTATUS will be '1' when cval ticks have passed. Busy-check it.
while !CNTP_CTL_EL0.matches_all(CNTP_CTL_EL0::ISTATUS::SET) {}
// Disable counting again.
CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::CLEAR);
}
}

@ -0,0 +1,13 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Conditional reexporting of Board Support Packages.
mod device_driver;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod raspberrypi;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
pub use raspberrypi::*;

@ -0,0 +1,16 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Device driver.
#[cfg(feature = "bsp_rpi4")]
mod arm;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod bcm;
mod common;
#[cfg(feature = "bsp_rpi4")]
pub use arm::*;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
pub use bcm::*;

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! ARM driver top level.
pub mod gicv2;
pub use gicv2::*;

@ -0,0 +1,248 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! GICv2 Driver - ARM Generic Interrupt Controller v2.
//!
//! The following is a collection of excerpts with useful information from
//! - `Programmer's Guide for ARMv8-A`
//! - `ARM Generic Interrupt Controller Architecture Specification`
//!
//! # Programmer's Guide - 10.6.1 Configuration
//!
//! The GIC is accessed as a memory-mapped peripheral.
//!
//! All cores can access the common Distributor, but the CPU interface is banked, that is, each core
//! uses the same address to access its own private CPU interface.
//!
//! It is not possible for a core to access the CPU interface of another core.
//!
//! # Architecture Specification - 10.6.2 Initialization
//!
//! Both the Distributor and the CPU interfaces are disabled at reset. The GIC must be initialized
//! after reset before it can deliver interrupts to the core.
//!
//! In the Distributor, software must configure the priority, target, security and enable individual
//! interrupts. The Distributor must subsequently be enabled through its control register
//! (GICD_CTLR). For each CPU interface, software must program the priority mask and preemption
//! settings.
//!
//! Each CPU interface block itself must be enabled through its control register (GICD_CTLR). This
//! prepares the GIC to deliver interrupts to the core.
//!
//! Before interrupts are expected in the core, software prepares the core to take interrupts by
//! setting a valid interrupt vector in the vector table, and clearing interrupt mask bits in
//! PSTATE, and setting the routing controls.
//!
//! The entire interrupt mechanism in the system can be disabled by disabling the Distributor.
//! Interrupt delivery to an individual core can be disabled by disabling its CPU interface.
//! Individual interrupts can also be disabled (or enabled) in the distributor.
//!
//! For an interrupt to reach the core, the individual interrupt, Distributor and CPU interface must
//! all be enabled. The interrupt also needs to be of sufficient priority, that is, higher than the
//! core's priority mask.
//!
//! # Architecture Specification - 1.4.2 Interrupt types
//!
//! - Peripheral interrupt
//! - Private Peripheral Interrupt (PPI)
//! - This is a peripheral interrupt that is specific to a single processor.
//! - Shared Peripheral Interrupt (SPI)
//! - This is a peripheral interrupt that the Distributor can route to any of a specified
//! combination of processors.
//!
//! - Software-generated interrupt (SGI)
//! - This is an interrupt generated by software writing to a GICD_SGIR register in the GIC. The
//! system uses SGIs for interprocessor communication.
//! - An SGI has edge-triggered properties. The software triggering of the interrupt is
//! equivalent to the edge transition of the interrupt request signal.
//! - When an SGI occurs in a multiprocessor implementation, the CPUID field in the Interrupt
//! Acknowledge Register, GICC_IAR, or the Aliased Interrupt Acknowledge Register, GICC_AIAR,
//! identifies the processor that requested the interrupt.
//!
//! # Architecture Specification - 2.2.1 Interrupt IDs
//!
//! Interrupts from sources are identified using ID numbers. Each CPU interface can see up to 1020
//! interrupts. The banking of SPIs and PPIs increases the total number of interrupts supported by
//! the Distributor.
//!
//! The GIC assigns interrupt ID numbers ID0-ID1019 as follows:
//! - Interrupt numbers 32..1019 are used for SPIs.
//! - Interrupt numbers 0..31 are used for interrupts that are private to a CPU interface. These
//! interrupts are banked in the Distributor.
//! - A banked interrupt is one where the Distributor can have multiple interrupts with the
//! same ID. A banked interrupt is identified uniquely by its ID number and its associated
//! CPU interface number. Of the banked interrupt IDs:
//! - 00..15 SGIs
//! - 16..31 PPIs
mod gicc;
mod gicd;
use crate::{bsp, cpu, driver, exception, memory, synchronization, synchronization::InitStateLock};
use core::sync::atomic::{AtomicBool, Ordering};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
type HandlerTable = [Option<exception::asynchronous::IRQDescriptor>; GICv2::NUM_IRQS];
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Used for the associated type of trait [`exception::asynchronous::interface::IRQManager`].
pub type IRQNumber = exception::asynchronous::IRQNumber<{ GICv2::MAX_IRQ_NUMBER }>;
/// Representation of the GIC.
pub struct GICv2 {
gicd_mmio_descriptor: memory::mmu::MMIODescriptor,
gicc_mmio_descriptor: memory::mmu::MMIODescriptor,
/// The Distributor.
gicd: gicd::GICD,
/// The CPU Interface.
gicc: gicc::GICC,
/// Have the MMIO regions been remapped yet?
is_mmio_remapped: AtomicBool,
/// Stores registered IRQ handlers. Writable only during kernel init. RO afterwards.
handler_table: InitStateLock<HandlerTable>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl GICv2 {
const MAX_IRQ_NUMBER: usize = 300; // Normally 1019, but keep it lower to save some space.
const NUM_IRQS: usize = Self::MAX_IRQ_NUMBER + 1;
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(
gicd_mmio_descriptor: memory::mmu::MMIODescriptor,
gicc_mmio_descriptor: memory::mmu::MMIODescriptor,
) -> Self {
Self {
gicd_mmio_descriptor,
gicc_mmio_descriptor,
gicd: gicd::GICD::new(gicd_mmio_descriptor.start_addr().into_usize()),
gicc: gicc::GICC::new(gicc_mmio_descriptor.start_addr().into_usize()),
is_mmio_remapped: AtomicBool::new(false),
handler_table: InitStateLock::new([None; Self::NUM_IRQS]),
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::ReadWriteEx;
impl driver::interface::DeviceDriver for GICv2 {
fn compatible(&self) -> &'static str {
"GICv2 (ARM Generic Interrupt Controller v2)"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let remapped = self.is_mmio_remapped.load(Ordering::Relaxed);
if !remapped {
let mut virt_addr;
// GICD
virt_addr = memory::mmu::kernel_map_mmio("GICD", &self.gicd_mmio_descriptor)?;
self.gicd.set_mmio(virt_addr.into_usize());
// GICC
virt_addr = memory::mmu::kernel_map_mmio("GICC", &self.gicc_mmio_descriptor)?;
self.gicc.set_mmio(virt_addr.into_usize());
// Conclude remapping.
self.is_mmio_remapped.store(true, Ordering::Relaxed);
}
if bsp::cpu::BOOT_CORE_ID == cpu::smp::core_id() {
self.gicd.boot_core_init();
}
self.gicc.priority_accept_all();
self.gicc.enable();
Ok(())
}
}
impl exception::asynchronous::interface::IRQManager for GICv2 {
type IRQNumberType = IRQNumber;
fn register_handler(
&self,
irq_number: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
self.handler_table.write(|table| {
let irq_number = irq_number.get();
if table[irq_number].is_some() {
return Err("IRQ handler already registered");
}
table[irq_number] = Some(descriptor);
Ok(())
})
}
fn enable(&self, irq_number: Self::IRQNumberType) {
self.gicd.enable(irq_number);
}
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
// Extract the highest priority pending IRQ number from the Interrupt Acknowledge Register
// (IAR).
let irq_number = self.gicc.pending_irq_number(ic);
// Guard against spurious interrupts.
if irq_number > GICv2::MAX_IRQ_NUMBER {
return;
}
// Call the IRQ handler. Panic if there is none.
self.handler_table.read(|table| {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
Some(descriptor) => {
// Call the IRQ handler. Panics on failure.
descriptor.handler.handle().expect("Error handling IRQ");
}
}
});
// Signal completion of handling.
self.gicc.mark_comleted(irq_number as u32, ic);
}
fn print_handler(&self) {
use crate::info;
info!(" Peripheral handler:");
self.handler_table.read(|table| {
for (i, opt) in table.iter().skip(32).enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i + 32, handler.name);
}
}
});
}
}

@ -0,0 +1,152 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! GICC Driver - GIC CPU interface.
use crate::{
bsp::device_driver::common::MMIODerefWrapper, exception, synchronization::InitStateLock,
};
use register::{mmio::*, register_bitfields, register_structs};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
register_bitfields! {
u32,
/// CPU Interface Control Register
CTLR [
Enable OFFSET(0) NUMBITS(1) []
],
/// Interrupt Priority Mask Register
PMR [
Priority OFFSET(0) NUMBITS(8) []
],
/// Interrupt Acknowledge Register
IAR [
InterruptID OFFSET(0) NUMBITS(10) []
],
/// End of Interrupt Register
EOIR [
EOIINTID OFFSET(0) NUMBITS(10) []
]
}
register_structs! {
#[allow(non_snake_case)]
pub RegisterBlock {
(0x000 => CTLR: ReadWrite<u32, CTLR::Register>),
(0x004 => PMR: ReadWrite<u32, PMR::Register>),
(0x008 => _reserved1),
(0x00C => IAR: ReadWrite<u32, IAR::Register>),
(0x010 => EOIR: ReadWrite<u32, EOIR::Register>),
(0x014 => @END),
}
}
/// Abstraction for the associated MMIO registers.
type Registers = MMIODerefWrapper<RegisterBlock>;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Representation of the GIC CPU interface.
pub struct GICC {
registers: InitStateLock<Registers>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use crate::synchronization::interface::ReadWriteEx;
impl GICC {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
registers: InitStateLock::new(Registers::new(mmio_start_addr)),
}
}
pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
self.registers
.write(|regs| *regs = Registers::new(new_mmio_start_addr));
}
/// Accept interrupts of any priority.
///
/// Quoting the GICv2 Architecture Specification:
///
/// "Writing 255 to the GICC_PMR always sets it to the largest supported priority field
/// value."
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn priority_accept_all(&self) {
self.registers.read(|regs| {
regs.PMR.write(PMR::Priority.val(255)); // Comment in arch spec.
});
}
/// Enable the interface - start accepting IRQs.
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
pub fn enable(&self) {
self.registers.read(|regs| {
regs.CTLR.write(CTLR::Enable::SET);
});
}
/// Extract the number of the highest-priority pending IRQ.
///
/// Can only be called from IRQ context, which is ensured by taking an `IRQContext` token.
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn pending_irq_number<'irq_context>(
&self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) -> usize {
self.registers
.read(|regs| regs.IAR.read(IAR::InterruptID) as usize)
}
/// Complete handling of the currently active IRQ.
///
/// Can only be called from IRQ context, which is ensured by taking an `IRQContext` token.
///
/// To be called after `pending_irq_number()`.
///
/// # Safety
///
/// - GICC MMIO registers are banked per CPU core. It is therefore safe to have `&self` instead
/// of `&mut self`.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn mark_comleted<'irq_context>(
&self,
irq_number: u32,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
self.registers.read(|regs| {
regs.EOIR.write(EOIR::EOIINTID.val(irq_number));
});
}
}

@ -0,0 +1,205 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! GICD Driver - GIC Distributor.
//!
//! # Glossary
//! - SPI - Shared Peripheral Interrupt.
use crate::{
bsp::device_driver::common::MMIODerefWrapper,
state, synchronization,
synchronization::{IRQSafeNullLock, InitStateLock},
};
use register::{mmio::*, register_bitfields, register_structs};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
register_bitfields! {
u32,
/// Distributor Control Register
CTLR [
Enable OFFSET(0) NUMBITS(1) []
],
/// Interrupt Controller Type Register
TYPER [
ITLinesNumber OFFSET(0) NUMBITS(5) []
],
/// Interrupt Processor Targets Registers
ITARGETSR [
Offset3 OFFSET(24) NUMBITS(8) [],
Offset2 OFFSET(16) NUMBITS(8) [],
Offset1 OFFSET(8) NUMBITS(8) [],
Offset0 OFFSET(0) NUMBITS(8) []
]
}
register_structs! {
#[allow(non_snake_case)]
SharedRegisterBlock {
(0x000 => CTLR: ReadWrite<u32, CTLR::Register>),
(0x004 => TYPER: ReadOnly<u32, TYPER::Register>),
(0x008 => _reserved1),
(0x104 => ISENABLER: [ReadWrite<u32>; 31]),
(0x108 => _reserved2),
(0x820 => ITARGETSR: [ReadWrite<u32, ITARGETSR::Register>; 248]),
(0x824 => @END),
}
}
register_structs! {
#[allow(non_snake_case)]
BankedRegisterBlock {
(0x000 => _reserved1),
(0x100 => ISENABLER: ReadWrite<u32>),
(0x104 => _reserved2),
(0x800 => ITARGETSR: [ReadOnly<u32, ITARGETSR::Register>; 8]),
(0x804 => @END),
}
}
/// Abstraction for the non-banked parts of the associated MMIO registers.
type SharedRegisters = MMIODerefWrapper<SharedRegisterBlock>;
/// Abstraction for the banked parts of the associated MMIO registers.
type BankedRegisters = MMIODerefWrapper<BankedRegisterBlock>;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Representation of the GIC Distributor.
pub struct GICD {
/// Access to shared registers is guarded with a lock.
shared_registers: IRQSafeNullLock<SharedRegisters>,
/// Access to banked registers is unguarded.
banked_registers: InitStateLock<BankedRegisters>,
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl SharedRegisters {
/// Return the number of IRQs that this HW implements.
#[inline(always)]
fn num_irqs(&mut self) -> usize {
// Query number of implemented IRQs.
//
// Refer to GICv2 Architecture Specification, Section 4.3.2.
((self.TYPER.read(TYPER::ITLinesNumber) as usize) + 1) * 32
}
/// Return a slice of the implemented ITARGETSR.
#[inline(always)]
fn implemented_itargets_slice(&mut self) -> &[ReadWrite<u32, ITARGETSR::Register>] {
assert!(self.num_irqs() >= 36);
// Calculate the max index of the shared ITARGETSR array.
//
// The first 32 IRQs are private, so not included in `shared_registers`. Each ITARGETS
// register has four entries, so shift right by two. Subtract one because we start
// counting at zero.
let spi_itargetsr_max_index = ((self.num_irqs() - 32) >> 2) - 1;
// Rust automatically inserts slice range sanity check, i.e. max >= min.
&self.ITARGETSR[0..spi_itargetsr_max_index]
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use crate::synchronization::interface::ReadWriteEx;
use synchronization::interface::Mutex;
impl GICD {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
shared_registers: IRQSafeNullLock::new(SharedRegisters::new(mmio_start_addr)),
banked_registers: InitStateLock::new(BankedRegisters::new(mmio_start_addr)),
}
}
pub unsafe fn set_mmio(&self, new_mmio_start_addr: usize) {
self.shared_registers
.lock(|regs| *regs = SharedRegisters::new(new_mmio_start_addr));
self.banked_registers
.write(|regs| *regs = BankedRegisters::new(new_mmio_start_addr));
}
/// Use a banked ITARGETSR to retrieve the executing core's GIC target mask.
///
/// Quoting the GICv2 Architecture Specification:
///
/// "GICD_ITARGETSR0 to GICD_ITARGETSR7 are read-only, and each field returns a value that
/// corresponds only to the processor reading the register."
fn local_gic_target_mask(&self) -> u32 {
self.banked_registers
.read(|regs| regs.ITARGETSR[0].read(ITARGETSR::Offset0))
}
/// Route all SPIs to the boot core and enable the distributor.
pub fn boot_core_init(&self) {
assert!(
state::state_manager().is_init(),
"Only allowed during kernel init phase"
);
// Target all SPIs to the boot core only.
let mask = self.local_gic_target_mask();
self.shared_registers.lock(|regs| {
for i in regs.implemented_itargets_slice().iter() {
i.write(
ITARGETSR::Offset3.val(mask)
+ ITARGETSR::Offset2.val(mask)
+ ITARGETSR::Offset1.val(mask)
+ ITARGETSR::Offset0.val(mask),
);
}
regs.CTLR.write(CTLR::Enable::SET);
});
}
/// Enable an interrupt.
pub fn enable(&self, irq_num: super::IRQNumber) {
let irq_num = irq_num.get();
// Each bit in the u32 enable register corresponds to one IRQ number. Shift right by 5
// (division by 32) and arrive at the index for the respective ISENABLER[i].
let enable_reg_index = irq_num >> 5;
let enable_bit: u32 = 1u32 << (irq_num % 32);
// Check if we are handling a private or shared IRQ.
match irq_num {
// Private.
0..=31 => self.banked_registers.read(|regs| {
let enable_reg = &regs.ISENABLER;
enable_reg.set(enable_reg.get() | enable_bit);
}),
// Shared.
_ => {
let enable_reg_index_shared = enable_reg_index - 1;
self.shared_registers.lock(|regs| {
let enable_reg = &regs.ISENABLER[enable_reg_index_shared];
enable_reg.set(enable_reg.get() | enable_bit);
});
}
}
}
}

@ -0,0 +1,15 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BCM driver top level.
mod bcm2xxx_gpio;
#[cfg(feature = "bsp_rpi3")]
mod bcm2xxx_interrupt_controller;
mod bcm2xxx_pl011_uart;
pub use bcm2xxx_gpio::*;
#[cfg(feature = "bsp_rpi3")]
pub use bcm2xxx_interrupt_controller::*;
pub use bcm2xxx_pl011_uart::*;

@ -0,0 +1,255 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! GPIO Driver.
use crate::{
bsp::device_driver::common::MMIODerefWrapper, driver, memory, synchronization,
synchronization::IRQSafeNullLock,
};
use core::sync::atomic::{AtomicUsize, Ordering};
use register::{mmio::*, register_bitfields, register_structs};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// GPIO registers.
//
// Descriptions taken from
// - https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
// - https://datasheets.raspberrypi.org/bcm2711/bcm2711-peripherals.pdf
register_bitfields! {
u32,
/// GPIO Function Select 1
GPFSEL1 [
/// Pin 15
FSEL15 OFFSET(15) NUMBITS(3) [
Input = 0b000,
Output = 0b001,
AltFunc0 = 0b100 // PL011 UART RX
],
/// Pin 14
FSEL14 OFFSET(12) NUMBITS(3) [
Input = 0b000,
Output = 0b001,
AltFunc0 = 0b100 // PL011 UART TX
]
],
/// GPIO Pull-up/down Register
///
/// BCM2837 only.
GPPUD [
/// Controls the actuation of the internal pull-up/down control line to ALL the GPIO pins.
PUD OFFSET(0) NUMBITS(2) [
Off = 0b00,
PullDown = 0b01,
PullUp = 0b10
]
],
/// GPIO Pull-up/down Clock Register 0
///
/// BCM2837 only.
GPPUDCLK0 [
/// Pin 15
PUDCLK15 OFFSET(15) NUMBITS(1) [
NoEffect = 0,
AssertClock = 1
],
/// Pin 14
PUDCLK14 OFFSET(14) NUMBITS(1) [
NoEffect = 0,
AssertClock = 1
]
],
/// GPIO Pull-up / Pull-down Register 0
///
/// BCM2711 only.
GPIO_PUP_PDN_CNTRL_REG0 [
/// Pin 15
GPIO_PUP_PDN_CNTRL15 OFFSET(30) NUMBITS(2) [
NoResistor = 0b00,
PullUp = 0b01
],
/// Pin 14
GPIO_PUP_PDN_CNTRL14 OFFSET(28) NUMBITS(2) [
NoResistor = 0b00,
PullUp = 0b01
]
]
}
register_structs! {
#[allow(non_snake_case)]
RegisterBlock {
(0x00 => _reserved1),
(0x04 => GPFSEL1: ReadWrite<u32, GPFSEL1::Register>),
(0x08 => _reserved2),
(0x94 => GPPUD: ReadWrite<u32, GPPUD::Register>),
(0x98 => GPPUDCLK0: ReadWrite<u32, GPPUDCLK0::Register>),
(0x9C => _reserved3),
(0xE4 => GPIO_PUP_PDN_CNTRL_REG0: ReadWrite<u32, GPIO_PUP_PDN_CNTRL_REG0::Register>),
(0xE8 => @END),
}
}
/// Abstraction for the associated MMIO registers.
type Registers = MMIODerefWrapper<RegisterBlock>;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub struct GPIOInner {
registers: Registers,
}
// Export the inner struct so that BSPs can use it for the panic handler.
pub use GPIOInner as PanicGPIO;
/// Representation of the GPIO HW.
pub struct GPIO {
mmio_descriptor: memory::mmu::MMIODescriptor,
virt_mmio_start_addr: AtomicUsize,
inner: IRQSafeNullLock<GPIOInner>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl GPIOInner {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
registers: Registers::new(mmio_start_addr),
}
}
/// Init code.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub unsafe fn init(&mut self, new_mmio_start_addr: Option<usize>) -> Result<(), &'static str> {
if let Some(addr) = new_mmio_start_addr {
self.registers = Registers::new(addr);
}
Ok(())
}
/// Disable pull-up/down on pins 14 and 15.
#[cfg(feature = "bsp_rpi3")]
fn disable_pud_14_15_bcm2837(&mut self) {
use crate::{time, time::interface::TimeManager};
use core::time::Duration;
// The Linux 2837 GPIO driver waits 1 µs between the steps.
const DELAY: Duration = Duration::from_micros(1);
self.registers.GPPUD.write(GPPUD::PUD::Off);
time::time_manager().spin_for(DELAY);
self.registers
.GPPUDCLK0
.write(GPPUDCLK0::PUDCLK15::AssertClock + GPPUDCLK0::PUDCLK14::AssertClock);
time::time_manager().spin_for(DELAY);
self.registers.GPPUD.write(GPPUD::PUD::Off);
self.registers.GPPUDCLK0.set(0);
}
/// Disable pull-up/down on pins 14 and 15.
#[cfg(feature = "bsp_rpi4")]
fn disable_pud_14_15_bcm2711(&mut self) {
self.registers.GPIO_PUP_PDN_CNTRL_REG0.write(
GPIO_PUP_PDN_CNTRL_REG0::GPIO_PUP_PDN_CNTRL15::PullUp
+ GPIO_PUP_PDN_CNTRL_REG0::GPIO_PUP_PDN_CNTRL14::PullUp,
);
}
/// Map PL011 UART as standard output.
///
/// TX to pin 14
/// RX to pin 15
pub fn map_pl011_uart(&mut self) {
// Select the UART on pins 14 and 15.
self.registers
.GPFSEL1
.modify(GPFSEL1::FSEL15::AltFunc0 + GPFSEL1::FSEL14::AltFunc0);
// Disable pull-up/down on pins 14 and 15.
#[cfg(feature = "bsp_rpi3")]
self.disable_pud_14_15_bcm2837();
#[cfg(feature = "bsp_rpi4")]
self.disable_pud_14_15_bcm2711();
}
}
impl GPIO {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(mmio_descriptor: memory::mmu::MMIODescriptor) -> Self {
Self {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(GPIOInner::new(mmio_descriptor.start_addr().into_usize())),
}
}
/// Concurrency safe version of `GPIOInner.map_pl011_uart()`
pub fn map_pl011_uart(&self) {
self.inner.lock(|inner| inner.map_pl011_uart())
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::Mutex;
impl driver::interface::DeviceDriver for GPIO {
fn compatible(&self) -> &'static str {
"BCM GPIO"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
Ok(())
}
fn virt_mmio_start_addr(&self) -> Option<usize> {
let addr = self.virt_mmio_start_addr.load(Ordering::Relaxed);
if addr == 0 {
return None;
}
Some(addr)
}
}

@ -0,0 +1,138 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Interrupt Controller Driver.
mod peripheral_ic;
use crate::{driver, exception, memory};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Wrapper struct for a bitmask indicating pending IRQ numbers.
struct PendingIRQs {
bitmask: u64,
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub type LocalIRQ =
exception::asynchronous::IRQNumber<{ InterruptController::MAX_LOCAL_IRQ_NUMBER }>;
pub type PeripheralIRQ =
exception::asynchronous::IRQNumber<{ InterruptController::MAX_PERIPHERAL_IRQ_NUMBER }>;
/// Used for the associated type of trait [`exception::asynchronous::interface::IRQManager`].
#[derive(Copy, Clone)]
pub enum IRQNumber {
Local(LocalIRQ),
Peripheral(PeripheralIRQ),
}
/// Representation of the Interrupt Controller.
pub struct InterruptController {
periph: peripheral_ic::PeripheralIC,
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl PendingIRQs {
pub fn new(bitmask: u64) -> Self {
Self { bitmask }
}
}
impl Iterator for PendingIRQs {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
use core::intrinsics::cttz;
let next = cttz(self.bitmask);
if next == 64 {
return None;
}
self.bitmask &= !(1 << next);
Some(next as usize)
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl InterruptController {
const MAX_LOCAL_IRQ_NUMBER: usize = 11;
const MAX_PERIPHERAL_IRQ_NUMBER: usize = 63;
const NUM_PERIPHERAL_IRQS: usize = Self::MAX_PERIPHERAL_IRQ_NUMBER + 1;
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(
_local_mmio_descriptor: memory::mmu::MMIODescriptor,
periph_mmio_descriptor: memory::mmu::MMIODescriptor,
) -> Self {
Self {
periph: peripheral_ic::PeripheralIC::new(periph_mmio_descriptor),
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
impl driver::interface::DeviceDriver for InterruptController {
fn compatible(&self) -> &'static str {
"BCM Interrupt Controller"
}
unsafe fn init(&self) -> Result<(), &'static str> {
self.periph.init()
}
}
impl exception::asynchronous::interface::IRQManager for InterruptController {
type IRQNumberType = IRQNumber;
fn register_handler(
&self,
irq: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
match irq {
IRQNumber::Local(_) => unimplemented!("Local IRQ controller not implemented."),
IRQNumber::Peripheral(pirq) => self.periph.register_handler(pirq, descriptor),
}
}
fn enable(&self, irq: Self::IRQNumberType) {
match irq {
IRQNumber::Local(_) => unimplemented!("Local IRQ controller not implemented."),
IRQNumber::Peripheral(pirq) => self.periph.enable(pirq),
}
}
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
// It can only be a peripheral IRQ pending because enable() does not support local IRQs yet.
self.periph.handle_pending_irqs(ic)
}
fn print_handler(&self) {
self.periph.print_handler();
}
}

@ -0,0 +1,188 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Peripheral Interrupt Controller Driver.
use super::{InterruptController, PendingIRQs, PeripheralIRQ};
use crate::{
bsp::device_driver::common::MMIODerefWrapper,
driver, exception, memory, synchronization,
synchronization::{IRQSafeNullLock, InitStateLock},
};
use register::{mmio::*, register_structs};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
register_structs! {
#[allow(non_snake_case)]
WORegisterBlock {
(0x00 => _reserved1),
(0x10 => ENABLE_1: WriteOnly<u32>),
(0x14 => ENABLE_2: WriteOnly<u32>),
(0x24 => @END),
}
}
register_structs! {
#[allow(non_snake_case)]
RORegisterBlock {
(0x00 => _reserved1),
(0x04 => PENDING_1: ReadOnly<u32>),
(0x08 => PENDING_2: ReadOnly<u32>),
(0x0c => @END),
}
}
/// Abstraction for the WriteOnly parts of the associated MMIO registers.
type WriteOnlyRegisters = MMIODerefWrapper<WORegisterBlock>;
/// Abstraction for the ReadOnly parts of the associated MMIO registers.
type ReadOnlyRegisters = MMIODerefWrapper<RORegisterBlock>;
type HandlerTable =
[Option<exception::asynchronous::IRQDescriptor>; InterruptController::NUM_PERIPHERAL_IRQS];
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Representation of the peripheral interrupt controller.
pub struct PeripheralIC {
mmio_descriptor: memory::mmu::MMIODescriptor,
/// Access to write registers is guarded with a lock.
wo_registers: IRQSafeNullLock<WriteOnlyRegisters>,
/// Register read access is unguarded.
ro_registers: InitStateLock<ReadOnlyRegisters>,
/// Stores registered IRQ handlers. Writable only during kernel init. RO afterwards.
handler_table: InitStateLock<HandlerTable>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl PeripheralIC {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
pub const unsafe fn new(mmio_descriptor: memory::mmu::MMIODescriptor) -> Self {
let addr = mmio_descriptor.start_addr().into_usize();
Self {
mmio_descriptor,
wo_registers: IRQSafeNullLock::new(WriteOnlyRegisters::new(addr)),
ro_registers: InitStateLock::new(ReadOnlyRegisters::new(addr)),
handler_table: InitStateLock::new([None; InterruptController::NUM_PERIPHERAL_IRQS]),
}
}
/// Query the list of pending IRQs.
fn pending_irqs(&self) -> PendingIRQs {
self.ro_registers.read(|regs| {
let pending_mask: u64 =
(u64::from(regs.PENDING_2.get()) << 32) | u64::from(regs.PENDING_1.get());
PendingIRQs::new(pending_mask)
})
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::{Mutex, ReadWriteEx};
impl driver::interface::DeviceDriver for PeripheralIC {
fn compatible(&self) -> &'static str {
"BCM Peripheral Interrupt Controller"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr =
memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?.into_usize();
self.wo_registers
.lock(|regs| *regs = WriteOnlyRegisters::new(virt_addr));
self.ro_registers
.write(|regs| *regs = ReadOnlyRegisters::new(virt_addr));
Ok(())
}
}
impl exception::asynchronous::interface::IRQManager for PeripheralIC {
type IRQNumberType = PeripheralIRQ;
fn register_handler(
&self,
irq: Self::IRQNumberType,
descriptor: exception::asynchronous::IRQDescriptor,
) -> Result<(), &'static str> {
self.handler_table.write(|table| {
let irq_number = irq.get();
if table[irq_number].is_some() {
return Err("IRQ handler already registered");
}
table[irq_number] = Some(descriptor);
Ok(())
})
}
fn enable(&self, irq: Self::IRQNumberType) {
self.wo_registers.lock(|regs| {
let enable_reg = if irq.get() <= 31 {
&regs.ENABLE_1
} else {
&regs.ENABLE_2
};
let enable_bit: u32 = 1 << (irq.get() % 32);
// Writing a 1 to a bit will set the corresponding IRQ enable bit. All other IRQ enable
// bits are unaffected. So we don't need read and OR'ing here.
enable_reg.set(enable_bit);
});
}
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
_ic: &exception::asynchronous::IRQContext<'irq_context>,
) {
self.handler_table.read(|table| {
for irq_number in self.pending_irqs() {
match table[irq_number] {
None => panic!("No handler registered for IRQ {}", irq_number),
Some(descriptor) => {
// Call the IRQ handler. Panics on failure.
descriptor.handler.handle().expect("Error handling IRQ");
}
}
}
})
}
fn print_handler(&self) {
use crate::info;
info!(" Peripheral handler:");
self.handler_table.read(|table| {
for (i, opt) in table.iter().enumerate() {
if let Some(handler) = opt {
info!(" {: >3}. {}", i, handler.name);
}
}
});
}
}

@ -0,0 +1,537 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! PL011 UART driver.
//!
//! # Resources
//!
//! - <https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf>
//! - <https://developer.arm.com/documentation/ddi0183/latest>
use crate::{
bsp, bsp::device_driver::common::MMIODerefWrapper, console, cpu, driver, exception, memory,
synchronization, synchronization::IRQSafeNullLock,
};
use core::{
fmt,
sync::atomic::{AtomicUsize, Ordering},
};
use register::{mmio::*, register_bitfields, register_structs};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// PL011 UART registers.
//
// Descriptions taken from "PrimeCell UART (PL011) Technical Reference Manual" r1p5.
register_bitfields! {
u32,
/// Flag Register.
FR [
/// Transmit FIFO empty. The meaning of this bit depends on the state of the FEN bit in the
/// Line Control Register, LCR_H.
///
/// - If the FIFO is disabled, this bit is set when the transmit holding register is empty.
/// - If the FIFO is enabled, the TXFE bit is set when the transmit FIFO is empty.
/// - This bit does not indicate if there is data in the transmit shift register.
TXFE OFFSET(7) NUMBITS(1) [],
/// Transmit FIFO full. The meaning of this bit depends on the state of the FEN bit in the
/// LCR_H Register.
///
/// - If the FIFO is disabled, this bit is set when the transmit holding register is full.
/// - If the FIFO is enabled, the TXFF bit is set when the transmit FIFO is full.
TXFF OFFSET(5) NUMBITS(1) [],
/// Receive FIFO empty. The meaning of this bit depends on the state of the FEN bit in the
/// LCR_H Register.
///
/// If the FIFO is disabled, this bit is set when the receive holding register is empty. If
/// the FIFO is enabled, the RXFE bit is set when the receive FIFO is empty.
/// Receive FIFO empty. The meaning of this bit depends on the state of the FEN bit in the
/// LCR_H Register.
///
/// - If the FIFO is disabled, this bit is set when the receive holding register is empty.
/// - If the FIFO is enabled, the RXFE bit is set when the receive FIFO is empty.
RXFE OFFSET(4) NUMBITS(1) [],
/// UART busy. If this bit is set to 1, the UART is busy transmitting data. This bit remains
/// set until the complete byte, including all the stop bits, has been sent from the shift
/// register.
///
/// This bit is set as soon as the transmit FIFO becomes non-empty, regardless of whether
/// the UART is enabled or not.
BUSY OFFSET(3) NUMBITS(1) []
],
/// Integer Baud Rate Divisor.
IBRD [
/// The integer baud rate divisor.
BAUD_DIVINT OFFSET(0) NUMBITS(16) []
],
/// Fractional Baud Rate Divisor.
FBRD [
/// The fractional baud rate divisor.
BAUD_DIVFRAC OFFSET(0) NUMBITS(6) []
],
/// Line Control Register.
LCR_H [
/// Word length. These bits indicate the number of data bits transmitted or received in a
/// frame.
WLEN OFFSET(5) NUMBITS(2) [
FiveBit = 0b00,
SixBit = 0b01,
SevenBit = 0b10,
EightBit = 0b11
],
/// Enable FIFOs:
///
/// 0 = FIFOs are disabled (character mode) that is, the FIFOs become 1-byte-deep holding
/// registers.
///
/// 1 = Transmit and receive FIFO buffers are enabled (FIFO mode).
FEN OFFSET(4) NUMBITS(1) [
FifosDisabled = 0,
FifosEnabled = 1
]
],
/// Control Register.
CR [
/// Receive enable. If this bit is set to 1, the receive section of the UART is enabled.
/// Data reception occurs for either UART signals or SIR signals depending on the setting of
/// the SIREN bit. When the UART is disabled in the middle of reception, it completes the
/// current character before stopping.
RXE OFFSET(9) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Transmit enable. If this bit is set to 1, the transmit section of the UART is enabled.
/// Data transmission occurs for either UART signals, or SIR signals depending on the
/// setting of the SIREN bit. When the UART is disabled in the middle of transmission, it
/// completes the current character before stopping.
TXE OFFSET(8) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// UART enable:
///
/// 0 = UART is disabled. If the UART is disabled in the middle of transmission or
/// reception, it completes the current character before stopping.
///
/// 1 = The UART is enabled. Data transmission and reception occurs for either UART signals
/// or SIR signals depending on the setting of the SIREN bit
UARTEN OFFSET(0) NUMBITS(1) [
/// If the UART is disabled in the middle of transmission or reception, it completes the
/// current character before stopping.
Disabled = 0,
Enabled = 1
]
],
/// Interrupt FIFO Level Select Register.
IFLS [
/// Receive interrupt FIFO level select. The trigger points for the receive interrupt are as
/// follows.
RXIFLSEL OFFSET(3) NUMBITS(5) [
OneEigth = 0b000,
OneQuarter = 0b001,
OneHalf = 0b010,
ThreeQuarters = 0b011,
SevenEights = 0b100
]
],
/// Interrupt Mask Set/Clear Register.
IMSC [
/// Receive timeout interrupt mask. A read returns the current mask for the UARTRTINTR
/// interrupt.
///
/// - On a write of 1, the mask of the UARTRTINTR interrupt is set.
/// - A write of 0 clears the mask.
RTIM OFFSET(6) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Receive interrupt mask. A read returns the current mask for the UARTRXINTR interrupt.
///
/// - On a write of 1, the mask of the UARTRXINTR interrupt is set.
/// - A write of 0 clears the mask.
RXIM OFFSET(4) NUMBITS(1) [
Disabled = 0,
Enabled = 1
]
],
/// Masked Interrupt Status Register.
MIS [
/// Receive timeout masked interrupt status. Returns the masked interrupt state of the
/// UARTRTINTR interrupt.
RTMIS OFFSET(6) NUMBITS(1) [],
/// Receive masked interrupt status. Returns the masked interrupt state of the UARTRXINTR
/// interrupt.
RXMIS OFFSET(4) NUMBITS(1) []
],
/// Interrupt Clear Register.
ICR [
/// Meta field for all pending interrupts.
ALL OFFSET(0) NUMBITS(11) []
]
}
register_structs! {
#[allow(non_snake_case)]
pub RegisterBlock {
(0x00 => DR: ReadWrite<u32>),
(0x04 => _reserved1),
(0x18 => FR: ReadOnly<u32, FR::Register>),
(0x1c => _reserved2),
(0x24 => IBRD: WriteOnly<u32, IBRD::Register>),
(0x28 => FBRD: WriteOnly<u32, FBRD::Register>),
(0x2c => LCR_H: WriteOnly<u32, LCR_H::Register>),
(0x30 => CR: WriteOnly<u32, CR::Register>),
(0x34 => IFLS: ReadWrite<u32, IFLS::Register>),
(0x38 => IMSC: ReadWrite<u32, IMSC::Register>),
(0x3C => _reserved3),
(0x40 => MIS: ReadOnly<u32, MIS::Register>),
(0x44 => ICR: WriteOnly<u32, ICR::Register>),
(0x48 => @END),
}
}
/// Abstraction for the associated MMIO registers.
type Registers = MMIODerefWrapper<RegisterBlock>;
#[derive(PartialEq)]
enum BlockingMode {
Blocking,
NonBlocking,
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub struct PL011UartInner {
registers: Registers,
chars_written: usize,
chars_read: usize,
}
// Export the inner struct so that BSPs can use it for the panic handler.
pub use PL011UartInner as PanicUart;
/// Representation of the UART.
pub struct PL011Uart {
mmio_descriptor: memory::mmu::MMIODescriptor,
virt_mmio_start_addr: AtomicUsize,
inner: IRQSafeNullLock<PL011UartInner>,
irq_number: bsp::device_driver::IRQNumber,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl PL011UartInner {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub const unsafe fn new(mmio_start_addr: usize) -> Self {
Self {
registers: Registers::new(mmio_start_addr),
chars_written: 0,
chars_read: 0,
}
}
/// Set up baud rate and characteristics.
///
/// This results in 8N1 and 921_600 baud.
///
/// The calculation for the BRD is (we set the clock to 48 MHz in config.txt):
/// `(48_000_000 / 16) / 921_600 = 3.2552083`.
///
/// This means the integer part is `3` and goes into the `IBRD`.
/// The fractional part is `0.2552083`.
///
/// `FBRD` calculation according to the PL011 Technical Reference Manual:
/// `INTEGER((0.2552083 * 64) + 0.5) = 16`.
///
/// Therefore, the generated baud rate divider is: `3 + 16/64 = 3.25`. Which results in a
/// genrated baud rate of `48_000_000 / (16 * 3.25) = 923_077`.
///
/// Error = `((923_077 - 921_600) / 921_600) * 100 = 0.16%`.
///
/// # Safety
///
/// - The user must ensure to provide a correct MMIO start address.
pub unsafe fn init(&mut self, new_mmio_start_addr: Option<usize>) -> Result<(), &'static str> {
if let Some(addr) = new_mmio_start_addr {
self.registers = Registers::new(addr);
}
// Execution can arrive here while there are still characters queued in the TX FIFO and
// actively being sent out by the UART hardware. If the UART is turned off in this case,
// those queued characters would be lost.
//
// For example, this can happen during runtime on a call to panic!(), because panic!()
// initializes its own UART instance and calls init().
//
// Hence, flush first to ensure all pending characters are transmitted.
self.flush();
// Turn the UART off temporarily.
self.registers.CR.set(0);
// Clear all pending interrupts.
self.registers.ICR.write(ICR::ALL::CLEAR);
// From the PL011 Technical Reference Manual:
//
// The LCR_H, IBRD, and FBRD registers form the single 30-bit wide LCR Register that is
// updated on a single write strobe generated by a LCR_H write. So, to internally update the
// contents of IBRD or FBRD, a LCR_H write must always be performed at the end.
//
// Set the baud rate, 8N1 and FIFO enabled.
self.registers.IBRD.write(IBRD::BAUD_DIVINT.val(3));
self.registers.FBRD.write(FBRD::BAUD_DIVFRAC.val(16));
self.registers
.LCR_H
.write(LCR_H::WLEN::EightBit + LCR_H::FEN::FifosEnabled);
// Set RX FIFO fill level at 1/8.
self.registers.IFLS.write(IFLS::RXIFLSEL::OneEigth);
// Enable RX IRQ + RX timeout IRQ.
self.registers
.IMSC
.write(IMSC::RXIM::Enabled + IMSC::RTIM::Enabled);
// Turn the UART on.
self.registers
.CR
.write(CR::UARTEN::Enabled + CR::TXE::Enabled + CR::RXE::Enabled);
Ok(())
}
/// Send a character.
fn write_char(&mut self, c: char) {
// Spin while TX FIFO full is set, waiting for an empty slot.
while self.registers.FR.matches_all(FR::TXFF::SET) {
cpu::nop();
}
// Write the character to the buffer.
self.registers.DR.set(c as u32);
self.chars_written += 1;
}
/// Block execution until the last buffered character has been physically put on the TX wire.
fn flush(&self) {
// Spin until the busy bit is cleared.
while self.registers.FR.matches_all(FR::BUSY::SET) {
cpu::nop();
}
}
/// Retrieve a character.
fn read_char_converting(&mut self, blocking_mode: BlockingMode) -> Option<char> {
// If RX FIFO is empty,
if self.registers.FR.matches_all(FR::RXFE::SET) {
// immediately return in non-blocking mode.
if blocking_mode == BlockingMode::NonBlocking {
return None;
}
// Otherwise, wait until a char was received.
while self.registers.FR.matches_all(FR::RXFE::SET) {
cpu::nop();
}
}
// Read one character.
let mut ret = self.registers.DR.get() as u8 as char;
// Convert carrige return to newline.
if ret == '\r' {
ret = '\n'
}
// Update statistics.
self.chars_read += 1;
Some(ret)
}
}
/// Implementing `core::fmt::Write` enables usage of the `format_args!` macros, which in turn are
/// used to implement the `kernel`'s `print!` and `println!` macros. By implementing `write_str()`,
/// we get `write_fmt()` automatically.
///
/// The function takes an `&mut self`, so it must be implemented for the inner struct.
///
/// See [`src/print.rs`].
///
/// [`src/print.rs`]: ../../print/index.html
impl fmt::Write for PL011UartInner {
fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.chars() {
self.write_char(c);
}
Ok(())
}
}
impl PL011Uart {
/// Create an instance.
///
/// # Safety
///
/// - The user must ensure to provide correct MMIO descriptors.
/// - The user must ensure to provide correct IRQ numbers.
pub const unsafe fn new(
mmio_descriptor: memory::mmu::MMIODescriptor,
irq_number: bsp::device_driver::IRQNumber,
) -> Self {
Self {
mmio_descriptor,
virt_mmio_start_addr: AtomicUsize::new(0),
inner: IRQSafeNullLock::new(PL011UartInner::new(
mmio_descriptor.start_addr().into_usize(),
)),
irq_number,
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use synchronization::interface::Mutex;
impl driver::interface::DeviceDriver for PL011Uart {
fn compatible(&self) -> &'static str {
"BCM PL011 UART"
}
unsafe fn init(&self) -> Result<(), &'static str> {
let virt_addr = memory::mmu::kernel_map_mmio(self.compatible(), &self.mmio_descriptor)?;
self.inner
.lock(|inner| inner.init(Some(virt_addr.into_usize())))?;
self.virt_mmio_start_addr
.store(virt_addr.into_usize(), Ordering::Relaxed);
Ok(())
}
fn register_and_enable_irq_handler(&'static self) -> Result<(), &'static str> {
use bsp::exception::asynchronous::irq_manager;
use exception::asynchronous::{interface::IRQManager, IRQDescriptor};
let descriptor = IRQDescriptor {
name: "BCM PL011 UART",
handler: self,
};
irq_manager().register_handler(self.irq_number, descriptor)?;
irq_manager().enable(self.irq_number);
Ok(())
}
fn virt_mmio_start_addr(&self) -> Option<usize> {
let addr = self.virt_mmio_start_addr.load(Ordering::Relaxed);
if addr == 0 {
return None;
}
Some(addr)
}
}
impl console::interface::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
self.inner.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
self.inner.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
// Spin until TX FIFO empty is set.
self.inner.lock(|inner| inner.flush());
}
}
impl console::interface::Read for PL011Uart {
fn read_char(&self) -> char {
self.inner
.lock(|inner| inner.read_char_converting(BlockingMode::Blocking).unwrap())
}
fn clear_rx(&self) {
// Read from the RX FIFO until it is indicating empty.
while self
.inner
.lock(|inner| inner.read_char_converting(BlockingMode::NonBlocking))
.is_some()
{}
}
}
impl console::interface::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
self.inner.lock(|inner| inner.chars_written)
}
fn chars_read(&self) -> usize {
self.inner.lock(|inner| inner.chars_read)
}
}
impl exception::asynchronous::interface::IRQHandler for PL011Uart {
fn handle(&self) -> Result<(), &'static str> {
self.inner.lock(|inner| {
let pending = inner.registers.MIS.extract();
// Clear all pending IRQs.
inner.registers.ICR.write(ICR::ALL::CLEAR);
// Check for any kind of RX interrupt.
if pending.matches_any(MIS::RXMIS::SET + MIS::RTMIS::SET) {
// Echo any received characters.
while let Some(c) = inner.read_char_converting(BlockingMode::NonBlocking) {
inner.write_char(c)
}
}
});
Ok(())
}
}

@ -0,0 +1,38 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Common device driver code.
use core::{marker::PhantomData, ops};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
pub struct MMIODerefWrapper<T> {
start_addr: usize,
phantom: PhantomData<fn() -> T>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<T> MMIODerefWrapper<T> {
/// Create an instance.
pub const unsafe fn new(start_addr: usize) -> Self {
Self {
start_addr,
phantom: PhantomData,
}
}
}
impl<T> ops::Deref for MMIODerefWrapper<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*(self.start_addr as *const _) }
}
}

@ -0,0 +1,62 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Top-level BSP file for the Raspberry Pi 3 and 4.
pub mod console;
pub mod cpu;
pub mod driver;
pub mod exception;
pub mod memory;
use super::device_driver;
use crate::memory::mmu::MMIODescriptor;
use memory::map::mmio;
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static GPIO: device_driver::GPIO =
unsafe { device_driver::GPIO::new(MMIODescriptor::new(mmio::GPIO_START, mmio::GPIO_SIZE)) };
static PL011_UART: device_driver::PL011Uart = unsafe {
device_driver::PL011Uart::new(
MMIODescriptor::new(mmio::PL011_UART_START, mmio::PL011_UART_SIZE),
exception::asynchronous::irq_map::PL011_UART,
)
};
#[cfg(feature = "bsp_rpi3")]
static INTERRUPT_CONTROLLER: device_driver::InterruptController = unsafe {
device_driver::InterruptController::new(
MMIODescriptor::new(mmio::LOCAL_IC_START, mmio::LOCAL_IC_SIZE),
MMIODescriptor::new(mmio::PERIPHERAL_IC_START, mmio::PERIPHERAL_IC_SIZE),
)
};
#[cfg(feature = "bsp_rpi4")]
static INTERRUPT_CONTROLLER: device_driver::GICv2 = unsafe {
device_driver::GICv2::new(
MMIODescriptor::new(mmio::GICD_START, mmio::GICD_SIZE),
MMIODescriptor::new(mmio::GICC_START, mmio::GICC_SIZE),
)
};
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Board identification.
pub fn board_name() -> &'static str {
#[cfg(feature = "bsp_rpi3")]
{
"Raspberry Pi 3"
}
#[cfg(feature = "bsp_rpi4")]
{
"Raspberry Pi 4"
}
}

@ -0,0 +1,88 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP console facilities.
use super::memory;
use crate::{bsp::device_driver, console, cpu, driver};
use core::fmt;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// In case of a panic, the panic handler uses this function to take a last shot at printing
/// something before the system is halted.
///
/// We try to init panic-versions of the GPIO and the UART. The panic versions are not protected
/// with synchronization primitives, which increases chances that we get to print something, even
/// when the kernel's default GPIO or UART instances happen to be locked at the time of the panic.
///
/// # Safety
///
/// - Use only for printing during a panic.
#[cfg(not(feature = "test_build"))]
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let mut panic_gpio = device_driver::PanicGPIO::new(memory::map::mmio::GPIO_START.into_usize());
let mut panic_uart =
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.into_usize());
// If remapping of the driver's MMIO already happened, take the remapped start address.
// Otherwise, take a chance with the default physical address.
let maybe_gpio_mmio_start_addr = super::GPIO.virt_mmio_start_addr();
let maybe_uart_mmio_start_addr = super::PL011_UART.virt_mmio_start_addr();
panic_gpio
.init(maybe_gpio_mmio_start_addr)
.unwrap_or_else(|_| cpu::wait_forever());
panic_gpio.map_pl011_uart();
panic_uart
.init(maybe_uart_mmio_start_addr)
.unwrap_or_else(|_| cpu::wait_forever());
panic_uart
}
/// Reduced version for test builds.
#[cfg(feature = "test_build")]
pub unsafe fn panic_console_out() -> impl fmt::Write {
use driver::interface::DeviceDriver;
let mut panic_uart =
device_driver::PanicUart::new(memory::map::mmio::PL011_UART_START.into_usize());
let maybe_uart_mmio_start_addr = super::PL011_UART.virt_mmio_start_addr();
panic_uart
.init(maybe_uart_mmio_start_addr)
.unwrap_or_else(|_| cpu::qemu_exit_failure());
panic_uart
}
/// Return a reference to the console.
pub fn console() -> &'static impl console::interface::All {
&super::PL011_UART
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
/// Minimal code needed to bring up the console in QEMU (for testing only). This is often less steps
/// than on real hardware due to QEMU's abstractions.
#[cfg(feature = "test_build")]
pub fn qemu_bring_up_console() {
use driver::interface::DeviceDriver;
// Calling the UART's init ensures that the BSP's instance of the UART does remap the MMIO
// addresses.
unsafe {
super::PL011_UART
.init()
.unwrap_or_else(|_| cpu::qemu_exit_failure());
}
}

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Processor code.
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Used by `arch` code to find the early boot core.
#[no_mangle]
#[link_section = ".text._start_arguments"]
pub static BOOT_CORE_ID: u64 = 0;

@ -0,0 +1,61 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP driver support.
use crate::driver;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Device Driver Manager type.
struct BSPDriverManager {
device_drivers: [&'static (dyn DeviceDriver + Sync); 3],
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static BSP_DRIVER_MANAGER: BSPDriverManager = BSPDriverManager {
device_drivers: [
&super::GPIO,
&super::PL011_UART,
&super::INTERRUPT_CONTROLLER,
],
};
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the driver manager.
pub fn driver_manager() -> &'static impl driver::interface::DriverManager {
&BSP_DRIVER_MANAGER
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use driver::interface::DeviceDriver;
impl driver::interface::DriverManager for BSPDriverManager {
fn all_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)] {
&self.device_drivers[..]
}
fn early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)] {
&self.device_drivers[0..=1]
}
fn non_early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)] {
&self.device_drivers[2..]
}
fn post_early_print_device_driver_init(&self) {
// Configure PL011Uart's output pins.
super::GPIO.map_pl011_uart();
}
}

@ -0,0 +1,7 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP synchronous and asynchronous exception handling.
pub mod asynchronous;

@ -0,0 +1,36 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP asynchronous exception handling.
use crate::{bsp, exception};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
#[cfg(feature = "bsp_rpi3")]
pub(in crate::bsp) mod irq_map {
use super::bsp::device_driver::{IRQNumber, PeripheralIRQ};
pub const PL011_UART: IRQNumber = IRQNumber::Peripheral(PeripheralIRQ::new(57));
}
#[cfg(feature = "bsp_rpi4")]
pub(in crate::bsp) mod irq_map {
use super::bsp::device_driver::IRQNumber;
pub const PL011_UART: IRQNumber = IRQNumber::new(153);
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the IRQ manager.
pub fn irq_manager() -> &'static impl exception::asynchronous::interface::IRQManager<
IRQNumberType = bsp::device_driver::IRQNumber,
> {
&super::super::INTERRUPT_CONTROLLER
}

@ -0,0 +1,87 @@
/* SPDX-License-Identifier: MIT OR Apache-2.0
*
* Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
*/
/* This file provides __kernel_virt_addr_space_size */
INCLUDE src/bsp/raspberrypi/kernel_virt_addr_space_size.ld;
/* The kernel's virtual address range will be:
*
* [END_ADDRESS_INCLUSIVE, START_ADDRESS]
* [u64::MAX , (u64::MAX - __kernel_virt_addr_space_size) + 1]
*
* Since the start address is needed to set the linker address below, calculate it now.
*/
__kernel_virt_start_addr = ((0xffffffffffffffff - __kernel_virt_addr_space_size) + 1);
/* The address at which the the kernel binary will be loaded by the Raspberry's firmware */
__rpi_load_addr = 0x80000;
ENTRY(__rpi_load_addr)
PHDRS
{
segment_rx PT_LOAD FLAGS(5); /* 5 == RX */
segment_rw PT_LOAD FLAGS(6); /* 6 == RW */
}
SECTIONS
{
/* Add the load address as an offset. Makes virt-to-phys translation easier for the human eye */
. = __kernel_virt_start_addr + __rpi_load_addr;
/***********************************************************************************************
* Code + RO Data + Global Offset Table
***********************************************************************************************/
__rx_start = .;
.text : AT(__rpi_load_addr)
{
KEEP(*(.text._start))
*(.text._start_arguments) /* Constants (or statics in Rust speak) read by _start(). */
*(.text._start_rust) /* The Rust entry point */
*(.text*) /* Everything else */
} :segment_rx
.rodata : ALIGN(8) { *(.rodata*) } :segment_rx
.got : ALIGN(8) { *(.got) } :segment_rx
. = ALIGN(64K); /* Align to page boundary */
__rx_end_exclusive = .;
/***********************************************************************************************
* Data + BSS
***********************************************************************************************/
__rw_start = .;
.data : { *(.data*) } :segment_rw
/* Section is zeroed in u64 chunks, align start and end to 8 bytes */
.bss : ALIGN(8)
{
__bss_start = .;
*(.bss*);
. = ALIGN(8);
. += 8; /* Fill for the bss == 0 case, so that __bss_start <= __bss_end_inclusive holds */
__bss_end_inclusive = . - 8;
} :NONE
. = ALIGN(64K); /* Align to page boundary */
__rw_end_exclusive = .;
/***********************************************************************************************
* Guard Page between boot core stack and data
***********************************************************************************************/
__boot_core_stack_guard_page_start = .;
. += 64K;
__boot_core_stack_guard_page_end_exclusive = .;
/***********************************************************************************************
* Boot Core Stack
***********************************************************************************************/
__boot_core_stack_start = .; /* ^ */
/* | stack */
. += 512K; /* | growth */
/* | direction */
__boot_core_stack_end_exclusive = .; /* | */
}

@ -0,0 +1,208 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management.
//!
//! The physical memory layout after the kernel has been loaded by the Raspberry's firmware, which
//! copies the binary to 0x8_0000:
//!
//! +---------------------------------------------+
//! | |
//! | Unmapped |
//! | |
//! +---------------------------------------------+
//! | | rx_start @ 0x8_0000
//! | .text |
//! | .rodata |
//! | .got |
//! | | rx_end_inclusive
//! +---------------------------------------------+
//! | | rw_start == rx_end
//! | .data |
//! | .bss |
//! | | rw_end_inclusive
//! +---------------------------------------------+
//! | | rw_end
//! | Unmapped Boot-core Stack Guard Page |
//! | |
//! +---------------------------------------------+
//! | | boot_core_stack_start ^
//! | | | stack
//! | Boot-core Stack | | growth
//! | | | direction
//! | | boot_core_stack_end_inclusive |
//! +---------------------------------------------+
pub mod mmu;
use crate::memory::{Address, Physical, Virtual};
use core::{cell::UnsafeCell, ops::RangeInclusive};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
// Symbols from the linker script.
extern "Rust" {
static __rx_start: UnsafeCell<()>;
static __rx_end_exclusive: UnsafeCell<()>;
static __rw_start: UnsafeCell<()>;
static __bss_start: UnsafeCell<u64>;
static __bss_end_inclusive: UnsafeCell<u64>;
static __rw_end_exclusive: UnsafeCell<()>;
static __boot_core_stack_start: UnsafeCell<()>;
static __boot_core_stack_end_exclusive: UnsafeCell<()>;
static __boot_core_stack_guard_page_start: UnsafeCell<()>;
static __boot_core_stack_guard_page_end_exclusive: UnsafeCell<()>;
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The board's physical memory map.
#[rustfmt::skip]
pub(super) mod map {
use super::*;
/// Physical devices.
#[cfg(feature = "bsp_rpi3")]
pub mod mmio {
use super::*;
pub const PERIPHERAL_IC_START: Address<Physical> = Address::new(0x3F00_B200);
pub const PERIPHERAL_IC_SIZE: usize = 0x24;
pub const GPIO_START: Address<Physical> = Address::new(0x3F20_0000);
pub const GPIO_SIZE: usize = 0xA0;
pub const PL011_UART_START: Address<Physical> = Address::new(0x3F20_1000);
pub const PL011_UART_SIZE: usize = 0x48;
pub const LOCAL_IC_START: Address<Physical> = Address::new(0x4000_0000);
pub const LOCAL_IC_SIZE: usize = 0x100;
pub const END: Address<Physical> = Address::new(0x4001_0000);
}
/// Physical devices.
#[cfg(feature = "bsp_rpi4")]
pub mod mmio {
use super::*;
pub const GPIO_START: Address<Physical> = Address::new(0xFE20_0000);
pub const GPIO_SIZE: usize = 0xA0;
pub const PL011_UART_START: Address<Physical> = Address::new(0xFE20_1000);
pub const PL011_UART_SIZE: usize = 0x48;
pub const GICD_START: Address<Physical> = Address::new(0xFF84_1000);
pub const GICD_SIZE: usize = 0x824;
pub const GICC_START: Address<Physical> = Address::new(0xFF84_2000);
pub const GICC_SIZE: usize = 0x14;
pub const END: Address<Physical> = Address::new(0xFF85_0000);
}
pub const END: Address<Physical> = mmio::END;
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Start address of the Read+Execute (RX) range.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn virt_rx_start() -> Address<Virtual> {
Address::new(unsafe { __rx_start.get() as usize })
}
/// Size of the Read+Execute (RX) range.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rx_size() -> usize {
unsafe { (__rx_end_exclusive.get() as usize) - (__rx_start.get() as usize) }
}
/// Start address of the Read+Write (RW) range.
#[inline(always)]
fn virt_rw_start() -> Address<Virtual> {
Address::new(unsafe { __rw_start.get() as usize })
}
/// Size of the Read+Write (RW) range.
///
/// # Safety
///
/// - Value is provided by the linker script and must be trusted as-is.
#[inline(always)]
fn rw_size() -> usize {
unsafe { (__rw_end_exclusive.get() as usize) - (__rw_start.get() as usize) }
}
/// Start address of the boot core's stack.
#[inline(always)]
fn virt_boot_core_stack_start() -> Address<Virtual> {
Address::new(unsafe { __boot_core_stack_start.get() as usize })
}
/// Size of the boot core's stack.
#[inline(always)]
fn boot_core_stack_size() -> usize {
unsafe {
(__boot_core_stack_end_exclusive.get() as usize) - (__boot_core_stack_start.get() as usize)
}
}
/// Start address of the boot core's stack guard page.
#[inline(always)]
fn virt_boot_core_stack_guard_page_start() -> Address<Virtual> {
Address::new(unsafe { __boot_core_stack_guard_page_start.get() as usize })
}
/// Size of the boot core's stack guard page.
#[inline(always)]
fn boot_core_stack_guard_page_size() -> usize {
unsafe {
(__boot_core_stack_guard_page_end_exclusive.get() as usize)
- (__boot_core_stack_guard_page_start.get() as usize)
}
}
/// Exclusive end address of the physical address space.
#[inline(always)]
fn phys_addr_space_end() -> Address<Physical> {
map::END
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return the inclusive range spanning the .bss section.
///
/// # Safety
///
/// - Values are provided by the linker script and must be trusted as-is.
/// - The linker-provided addresses must be u64 aligned.
pub fn bss_range_inclusive() -> RangeInclusive<*mut u64> {
let range;
unsafe {
range = RangeInclusive::new(__bss_start.get(), __bss_end_inclusive.get());
}
assert!(!range.is_empty());
range
}

@ -0,0 +1,187 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! BSP Memory Management Unit.
use crate::{
common,
memory::{
mmu as generic_mmu,
mmu::{
AccessPermissions, AddressSpace, AssociatedTranslationTable, AttributeFields,
MemAttributes, Page, PageSliceDescriptor, TranslationGranule,
},
Physical, Virtual,
},
synchronization::InitStateLock,
};
use core::convert::TryInto;
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
type KernelTranslationTable =
<KernelVirtAddrSpace as AssociatedTranslationTable>::TableStartFromTop;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// The translation granule chosen by this BSP. This will be used everywhere else in the kernel to
/// derive respective data structures and their sizes. For example, the `crate::memory::mmu::Page`.
pub type KernelGranule = TranslationGranule<{ 64 * 1024 }>;
/// The kernel's virtual address space defined by this BSP.
pub type KernelVirtAddrSpace = AddressSpace<{ get_virt_addr_space_size() }>;
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
/// The kernel translation tables.
///
/// It is mandatory that InitStateLock is transparent.
///
/// That is, `size_of(InitStateLock<KernelTranslationTable>) == size_of(KernelTranslationTable)`.
/// There is a unit tests that checks this porperty.
#[link_section = ".data"]
static KERNEL_TABLES: InitStateLock<KernelTranslationTable> =
InitStateLock::new(KernelTranslationTable::new_for_precompute());
/// This value is needed during early boot for MMU setup.
///
/// This will be patched to the correct value by the "translation table tool" after linking. This
/// given value here is just a dummy.
#[link_section = ".text._start_arguments"]
#[no_mangle]
static PHYS_KERNEL_TABLES_BASE_ADDR: u64 = 0xCCCCAAAAFFFFEEEE;
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// This is a hack for retrieving the value for the kernel's virtual address space size as a
/// constant from a common place, since it is needed as a compile-time/link-time constant in both,
/// the linker script and the Rust sources.
const fn get_virt_addr_space_size() -> usize {
let __kernel_virt_addr_space_size;
include!("../kernel_virt_addr_space_size.ld");
__kernel_virt_addr_space_size
}
/// Helper function for calculating the number of pages the given parameter spans.
const fn size_to_num_pages(size: usize) -> usize {
assert!(size > 0);
assert!(size % KernelGranule::SIZE == 0);
size >> KernelGranule::SHIFT
}
/// The Read+Execute (RX) pages of the kernel binary.
fn virt_rx_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rx_size());
PageSliceDescriptor::from_addr(super::virt_rx_start(), num_pages)
}
/// The Read+Write (RW) pages of the kernel binary.
fn virt_rw_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::rw_size());
PageSliceDescriptor::from_addr(super::virt_rw_start(), num_pages)
}
/// The boot core's stack.
fn virt_boot_core_stack_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::boot_core_stack_size());
PageSliceDescriptor::from_addr(super::virt_boot_core_stack_start(), num_pages)
}
// There is no reason to expect the following conversions to fail, since they were generated offline
// by the `translation table tool`. If it doesn't work, a panic due to the unwrap is justified.
/// The Read+Execute (RX) pages of the kernel binary.
fn phys_rx_page_desc() -> PageSliceDescriptor<Physical> {
virt_rx_page_desc().try_into().unwrap()
}
/// The Read+Write (RW) pages of the kernel binary.
fn phys_rw_page_desc() -> PageSliceDescriptor<Physical> {
virt_rw_page_desc().try_into().unwrap()
}
/// The boot core's stack.
fn phys_boot_core_stack_page_desc() -> PageSliceDescriptor<Physical> {
virt_boot_core_stack_page_desc().try_into().unwrap()
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the kernel's translation tables.
pub fn kernel_translation_tables() -> &'static InitStateLock<KernelTranslationTable> {
&KERNEL_TABLES
}
/// The boot core's stack guard page.
pub fn virt_boot_core_stack_guard_page_desc() -> PageSliceDescriptor<Virtual> {
let num_pages = size_to_num_pages(super::boot_core_stack_guard_page_size());
PageSliceDescriptor::from_addr(super::virt_boot_core_stack_guard_page_start(), num_pages)
}
/// Pointer to the last page of the physical address space.
pub fn phys_addr_space_end_page() -> *const Page<Physical> {
common::align_down(
super::phys_addr_space_end().into_usize(),
KernelGranule::SIZE,
) as *const Page<_>
}
/// Add mapping records for the kernel binary.
///
/// The actual translation table entries for the kernel binary are generated using the offline
/// `translation table tool` and patched into the kernel binary. This function just adds the mapping
/// record entries.
///
/// It must be ensured that these entries are in sync with the offline tool.
pub fn kernel_add_mapping_records_for_precomputed() {
generic_mmu::kernel_add_mapping_record(
"Kernel code and RO data",
&virt_rx_page_desc(),
&phys_rx_page_desc(),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
);
generic_mmu::kernel_add_mapping_record(
"Kernel data and bss",
&virt_rw_page_desc(),
&phys_rw_page_desc(),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
);
generic_mmu::kernel_add_mapping_record(
"Kernel boot-core stack",
&virt_boot_core_stack_page_desc(),
&phys_boot_core_stack_page_desc(),
&AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
);
}

@ -0,0 +1,21 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! General purpose code.
/// Check if a value is aligned to a given size.
#[inline(always)]
pub const fn is_aligned(value: usize, alignment: usize) -> bool {
assert!(alignment.is_power_of_two());
(value & (alignment - 1)) == 0
}
/// Align down.
#[inline(always)]
pub const fn align_down(value: usize, alignment: usize) -> usize {
assert!(alignment.is_power_of_two());
value & !(alignment - 1)
}

@ -0,0 +1,53 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! System console.
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Console interfaces.
pub mod interface {
use core::fmt;
/// Console write functions.
pub trait Write {
/// Write a single character.
fn write_char(&self, c: char);
/// Write a Rust format string.
fn write_fmt(&self, args: fmt::Arguments) -> fmt::Result;
/// Block until the last buffered character has been physically put on the TX wire.
fn flush(&self);
}
/// Console read functions.
pub trait Read {
/// Read a single character.
fn read_char(&self) -> char {
' '
}
/// Clear RX buffers, if any.
fn clear_rx(&self);
}
/// Console statistics.
pub trait Statistics {
/// Return the number of characters written.
fn chars_written(&self) -> usize {
0
}
/// Return the number of characters read.
fn chars_read(&self) -> usize {
0
}
}
/// Trait alias for a full-fledged console.
pub trait All = Write + Read + Statistics;
}

@ -0,0 +1,21 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Processor code.
#[cfg(target_arch = "aarch64")]
#[path = "_arch/aarch64/cpu.rs"]
mod arch_cpu;
mod boot;
pub mod smp;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_cpu::{nop, wait_forever};
#[cfg(feature = "test_build")]
pub use arch_cpu::{qemu_exit_failure, qemu_exit_success};

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//! Boot code.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/cpu/boot.rs"]
mod arch_boot;

@ -0,0 +1,14 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Symmetric multiprocessing.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/cpu/smp.rs"]
mod arch_smp;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_smp::core_id;

@ -0,0 +1,62 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Driver support.
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Driver interfaces.
pub mod interface {
/// Device Driver functions.
pub trait DeviceDriver {
/// Return a compatibility string for identifying the driver.
fn compatible(&self) -> &'static str;
/// Called by the kernel to bring up the device.
///
/// # Safety
///
/// - During init, drivers might do stuff with system-wide impact.
unsafe fn init(&self) -> Result<(), &'static str> {
Ok(())
}
/// Called by the kernel to register and enable the device's IRQ handlers, if any.
///
/// Rust's type system will prevent a call to this function unless the calling instance
/// itself has static lifetime.
fn register_and_enable_irq_handler(&'static self) -> Result<(), &'static str> {
Ok(())
}
/// After MMIO remapping, returns the new virtual start address.
///
/// This API assumes a driver has only a single, contiguous MMIO aperture, which will not be
/// the case for more complex devices. This API will likely change in future tutorials.
fn virt_mmio_start_addr(&self) -> Option<usize> {
None
}
}
/// Device driver management functions.
///
/// The `BSP` is supposed to supply one global instance.
pub trait DriverManager {
/// Return a slice of references to all `BSP`-instantiated drivers.
fn all_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)];
/// Return only those drivers needed for the BSP's early printing functionality.
///
/// For example, the default UART.
fn early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)];
/// Return all drivers minus early-print drivers.
fn non_early_print_device_drivers(&self) -> &[&'static (dyn DeviceDriver + Sync)];
/// Initialization code that runs after the early print driver init.
fn post_early_print_device_driver_init(&self);
}
}

@ -0,0 +1,48 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Synchronous and asynchronous exception handling.
#[cfg(target_arch = "aarch64")]
#[path = "_arch/aarch64/exception.rs"]
mod arch_exception;
pub mod asynchronous;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_exception::{current_privilege_level, handling_init};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Kernel privilege levels.
#[allow(missing_docs)]
#[derive(PartialEq)]
pub enum PrivilegeLevel {
User,
Kernel,
Hypervisor,
Unknown,
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Libkernel unit tests must execute in kernel mode.
#[kernel_test]
fn test_runner_executes_in_kernel_mode() {
let (level, _) = current_privilege_level();
assert!(level == PrivilegeLevel::Kernel)
}
}

@ -0,0 +1,152 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Asynchronous exception handling.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/exception/asynchronous.rs"]
mod arch_asynchronous;
use core::{fmt, marker::PhantomData};
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_asynchronous::{
is_local_irq_masked, local_irq_mask, local_irq_mask_save, local_irq_restore, local_irq_unmask,
print_state,
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Interrupt descriptor.
#[derive(Copy, Clone)]
pub struct IRQDescriptor {
/// Descriptive name.
pub name: &'static str,
/// Reference to handler trait object.
pub handler: &'static (dyn interface::IRQHandler + Sync),
}
/// IRQContext token.
///
/// An instance of this type indicates that the local core is currently executing in IRQ
/// context, aka executing an interrupt vector or subcalls of it.
///
/// Concept and implementation derived from the `CriticalSection` introduced in
/// <https://github.com/rust-embedded/bare-metal>
#[derive(Clone, Copy)]
pub struct IRQContext<'irq_context> {
_0: PhantomData<&'irq_context ()>,
}
/// Asynchronous exception handling interfaces.
pub mod interface {
/// Implemented by types that handle IRQs.
pub trait IRQHandler {
/// Called when the corresponding interrupt is asserted.
fn handle(&self) -> Result<(), &'static str>;
}
/// IRQ management functions.
///
/// The `BSP` is supposed to supply one global instance. Typically implemented by the
/// platform's interrupt controller.
pub trait IRQManager {
/// The IRQ number type depends on the implementation.
type IRQNumberType;
/// Register a handler.
fn register_handler(
&self,
irq_number: Self::IRQNumberType,
descriptor: super::IRQDescriptor,
) -> Result<(), &'static str>;
/// Enable an interrupt in the controller.
fn enable(&self, irq_number: Self::IRQNumberType);
/// Handle pending interrupts.
///
/// This function is called directly from the CPU's IRQ exception vector. On AArch64,
/// this means that the respective CPU core has disabled exception handling.
/// This function can therefore not be preempted and runs start to finish.
///
/// Takes an IRQContext token to ensure it can only be called from IRQ context.
#[allow(clippy::trivially_copy_pass_by_ref)]
fn handle_pending_irqs<'irq_context>(
&'irq_context self,
ic: &super::IRQContext<'irq_context>,
);
/// Print list of registered handlers.
fn print_handler(&self);
}
}
/// A wrapper type for IRQ numbers with integrated range sanity check.
#[derive(Copy, Clone)]
pub struct IRQNumber<const MAX_INCLUSIVE: usize>(usize);
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl<'irq_context> IRQContext<'irq_context> {
/// Creates an IRQContext token.
///
/// # Safety
///
/// - This must only be called when the current core is in an interrupt context and will not
/// live beyond the end of it. That is, creation is allowed in interrupt vector functions. For
/// example, in the ARMv8-A case, in `extern "C" fn current_elx_irq()`.
/// - Note that the lifetime `'irq_context` of the returned instance is unconstrained. User code
/// must not be able to influence the lifetime picked for this type, since that might cause it
/// to be inferred to `'static`.
#[inline(always)]
pub unsafe fn new() -> Self {
IRQContext { _0: PhantomData }
}
}
impl<const MAX_INCLUSIVE: usize> IRQNumber<{ MAX_INCLUSIVE }> {
/// Creates a new instance if number <= MAX_INCLUSIVE.
pub const fn new(number: usize) -> Self {
assert!(number <= MAX_INCLUSIVE);
Self { 0: number }
}
/// Return the wrapped number.
pub const fn get(self) -> usize {
self.0
}
}
impl<const MAX_INCLUSIVE: usize> fmt::Display for IRQNumber<{ MAX_INCLUSIVE }> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// Executes the provided closure while IRQs are masked on the executing core.
///
/// While the function temporarily changes the HW state of the executing core, it restores it to the
/// previous state before returning, so this is deemed safe.
#[inline(always)]
pub fn exec_with_irq_masked<T>(f: impl FnOnce() -> T) -> T {
let ret: T;
unsafe {
let saved = local_irq_mask_save();
ret = f();
local_irq_restore(saved);
}
ret
}

@ -0,0 +1,176 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
// Rust embedded logo for `make doc`.
#![doc(html_logo_url = "https://git.io/JeGIp")]
//! The `kernel` library.
//!
//! Used to compose the final kernel binary.
//!
//! # Code organization and architecture
//!
//! The code is divided into different *modules*, each representing a typical **subsystem** of the
//! `kernel`. Top-level module files of subsystems reside directly in the `src` folder. For example,
//! `src/memory.rs` contains code that is concerned with all things memory management.
//!
//! ## Visibility of processor architecture code
//!
//! Some of the `kernel`'s subsystems depend on low-level code that is specific to the target
//! processor architecture. For each supported processor architecture, there exists a subfolder in
//! `src/_arch`, for example, `src/_arch/aarch64`.
//!
//! The architecture folders mirror the subsystem modules laid out in `src`. For example,
//! architectural code that belongs to the `kernel`'s MMU subsystem (`src/memory/mmu.rs`) would go
//! into `src/_arch/aarch64/memory/mmu.rs`. The latter file is loaded as a module in
//! `src/memory/mmu.rs` using the `path attribute`. Usually, the chosen module name is the generic
//! module's name prefixed with `arch_`.
//!
//! For example, this is the top of `src/memory/mmu.rs`:
//!
//! ```
//! #[cfg(target_arch = "aarch64")]
//! #[path = "../_arch/aarch64/memory/mmu.rs"]
//! mod arch_mmu;
//! ```
//!
//! Often times, items from the `arch_ module` will be publicly reexported by the parent module.
//! This way, each architecture specific module can provide its implementation of an item, while the
//! caller must not be concerned which architecture has been conditionally compiled.
//!
//! ## BSP code
//!
//! `BSP` stands for Board Support Package. `BSP` code is organized under `src/bsp.rs` and contains
//! target board specific definitions and functions. These are things such as the board's memory map
//! or instances of drivers for devices that are featured on the respective board.
//!
//! Just like processor architecture code, the `BSP` code's module structure tries to mirror the
//! `kernel`'s subsystem modules, but there is no reexporting this time. That means whatever is
//! provided must be called starting from the `bsp` namespace, e.g. `bsp::driver::driver_manager()`.
//!
//! ## Kernel interfaces
//!
//! Both `arch` and `bsp` contain code that is conditionally compiled depending on the actual target
//! and board for which the kernel is compiled. For example, the `interrupt controller` hardware of
//! the `Raspberry Pi 3` and the `Raspberry Pi 4` is different, but we want the rest of the `kernel`
//! code to play nicely with any of the two without much hassle.
//!
//! In order to provide a clean abstraction between `arch`, `bsp` and `generic kernel code`,
//! `interface` traits are provided *whenever possible* and *where it makes sense*. They are defined
//! in the respective subsystem module and help to enforce the idiom of *program to an interface,
//! not an implementation*. For example, there will be a common IRQ handling interface which the two
//! different interrupt controller `drivers` of both Raspberrys will implement, and only export the
//! interface to the rest of the `kernel`.
//!
//! ```
//! +-------------------+
//! | Interface (Trait) |
//! | |
//! +--+-------------+--+
//! ^ ^
//! | |
//! | |
//! +----------+--+ +--+----------+
//! | kernel code | | bsp code |
//! | | | arch code |
//! +-------------+ +-------------+
//! ```
//!
//! # Summary
//!
//! For a logical `kernel` subsystem, corresponding code can be distributed over several physical
//! locations. Here is an example for the **memory** subsystem:
//!
//! - `src/memory.rs` and `src/memory/**/*`
//! - Common code that is agnostic of target processor architecture and `BSP` characteristics.
//! - Example: A function to zero a chunk of memory.
//! - Interfaces for the memory subsystem that are implemented by `arch` or `BSP` code.
//! - Example: An `MMU` interface that defines `MMU` function prototypes.
//! - `src/bsp/__board_name__/memory.rs` and `src/bsp/__board_name__/memory/**/*`
//! - `BSP` specific code.
//! - Example: The board's memory map (physical addresses of DRAM and MMIO devices).
//! - `src/_arch/__arch_name__/memory.rs` and `src/_arch/__arch_name__/memory/**/*`
//! - Processor architecture specific code.
//! - Example: Implementation of the `MMU` interface for the `__arch_name__` processor
//! architecture.
//!
//! From a namespace perspective, **memory** subsystem code lives in:
//!
//! - `crate::memory::*`
//! - `crate::bsp::memory::*`
//!
//! # Boot flow
//!
//! 1. The kernel's entry point is the function `cpu::boot::arch_boot::_start()`.
//! - It is implemented in `src/_arch/__arch_name__/cpu/boot.s`.
//! 2. Once finished with architectural setup, the arch code calls [`runtime_init::runtime_init()`].
//!
//! [`runtime_init::runtime_init()`]: runtime_init/fn.runtime_init.html
#![allow(clippy::clippy::upper_case_acronyms)]
#![allow(incomplete_features)]
#![feature(asm)]
#![feature(const_evaluatable_checked)]
#![feature(const_fn)]
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_generics)]
#![feature(const_panic)]
#![feature(core_intrinsics)]
#![feature(format_args_nl)]
#![feature(global_asm)]
#![feature(linkage)]
#![feature(panic_info_message)]
#![feature(trait_alias)]
#![no_std]
// Testing
#![cfg_attr(test, no_main)]
#![feature(custom_test_frameworks)]
#![reexport_test_harness_main = "test_main"]
#![test_runner(crate::test_runner)]
mod panic_wait;
mod runtime_init;
mod synchronization;
pub mod bsp;
pub mod common;
pub mod console;
pub mod cpu;
pub mod driver;
pub mod exception;
pub mod memory;
pub mod print;
pub mod state;
pub mod time;
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
/// The default runner for unit tests.
pub fn test_runner(tests: &[&test_types::UnitTest]) {
println!("Running {} tests", tests.len());
println!("-------------------------------------------------------------------\n");
for (i, test) in tests.iter().enumerate() {
print!("{:>3}. {:.<58}", i + 1, test.name);
// Run the actual test.
(test.test_func)();
// Failed tests call panic!(). Execution reaches here only if the test has passed.
println!("[ok]")
}
}
/// The `kernel_init()` for unit tests. Called from `runtime_init()`.
#[cfg(test)]
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
bsp::console::qemu_bring_up_console();
test_main();
cpu::qemu_exit_success()
}

@ -0,0 +1,105 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
// Rust embedded logo for `make doc`.
#![doc(html_logo_url = "https://git.io/JeGIp")]
//! The `kernel` binary.
#![feature(format_args_nl)]
#![no_main]
#![no_std]
use libkernel::{bsp, cpu, driver, exception, info, memory, state, time, warn};
/// Early init code.
///
/// # Safety
///
/// - Only a single core must be active and running this function.
/// - Printing will not work until the respective driver's MMIO is remapped.
#[no_mangle]
unsafe fn kernel_init() -> ! {
use driver::interface::DriverManager;
exception::handling_init();
// Add the mapping records for the precomputed entries first, so that they appear on the top of
// the list.
bsp::memory::mmu::kernel_add_mapping_records_for_precomputed();
// Bring up the drivers needed for printing first.
for i in bsp::driver::driver_manager()
.early_print_device_drivers()
.iter()
{
// Any encountered errors cannot be printed yet, obviously, so just safely park the CPU.
i.init().unwrap_or_else(|_| cpu::wait_forever());
}
bsp::driver::driver_manager().post_early_print_device_driver_init();
// Printing available from here on.
// Now bring up the remaining drivers.
for i in bsp::driver::driver_manager()
.non_early_print_device_drivers()
.iter()
{
if let Err(x) = i.init() {
panic!("Error loading driver: {}: {}", i.compatible(), x);
}
}
// Let device drivers register and enable their handlers with the interrupt controller.
for i in bsp::driver::driver_manager().all_device_drivers() {
if let Err(msg) = i.register_and_enable_irq_handler() {
warn!("Error registering IRQ handler: {}", msg);
}
}
// Unmask interrupts on the boot CPU core.
exception::asynchronous::local_irq_unmask();
// Announce conclusion of the kernel_init() phase.
state::state_manager().transition_to_single_core_main();
// Transition from unsafe to safe.
kernel_main()
}
/// The main function running after the early init.
fn kernel_main() -> ! {
use driver::interface::DriverManager;
use exception::asynchronous::interface::IRQManager;
info!("Booting on: {}", bsp::board_name());
info!("MMU online:");
memory::mmu::kernel_print_mappings();
let (_, privilege_level) = exception::current_privilege_level();
info!("Current privilege level: {}", privilege_level);
info!("Exception handling state:");
exception::asynchronous::print_state();
info!(
"Architectural timer resolution: {} ns",
time::time_manager().resolution().as_nanos()
);
info!("Drivers loaded:");
for (i, driver) in bsp::driver::driver_manager()
.all_device_drivers()
.iter()
.enumerate()
{
info!(" {}. {}", i + 1, driver.compatible());
}
info!("Registered IRQ handlers:");
bsp::exception::asynchronous::irq_manager().print_handler();
info!("Echoing input now");
cpu::wait_forever();
}

@ -0,0 +1,202 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management.
pub mod mmu;
use crate::common;
use core::{
convert::TryFrom,
fmt,
marker::PhantomData,
ops::{AddAssign, RangeInclusive, SubAssign},
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Metadata trait for marking the type of an address.
pub trait AddressType: Copy + Clone + PartialOrd + PartialEq {}
/// Zero-sized type to mark a physical address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub enum Physical {}
/// Zero-sized type to mark a virtual address.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub enum Virtual {}
/// Generic address type.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub struct Address<ATYPE: AddressType> {
value: usize,
_address_type: PhantomData<fn() -> ATYPE>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl AddressType for Physical {}
impl AddressType for Virtual {}
impl<ATYPE: AddressType> Address<ATYPE> {
/// Create an instance.
pub const fn new(value: usize) -> Self {
Self {
value,
_address_type: PhantomData,
}
}
/// Align down.
pub const fn align_down(self, alignment: usize) -> Self {
let aligned = common::align_down(self.value, alignment);
Self {
value: aligned,
_address_type: PhantomData,
}
}
/// Converts `Address` into an usize.
pub const fn into_usize(self) -> usize {
self.value
}
}
impl TryFrom<Address<Virtual>> for Address<Physical> {
type Error = mmu::TranslationError;
fn try_from(virt: Address<Virtual>) -> Result<Self, Self::Error> {
mmu::try_virt_to_phys(virt)
}
}
impl<ATYPE: AddressType> core::ops::Add<usize> for Address<ATYPE> {
type Output = Self;
fn add(self, other: usize) -> Self {
Self {
value: self.value + other,
_address_type: PhantomData,
}
}
}
impl<ATYPE: AddressType> AddAssign for Address<ATYPE> {
fn add_assign(&mut self, other: Self) {
*self = Self {
value: self.value + other.into_usize(),
_address_type: PhantomData,
};
}
}
impl<ATYPE: AddressType> core::ops::Sub<usize> for Address<ATYPE> {
type Output = Self;
fn sub(self, other: usize) -> Self {
Self {
value: self.value - other,
_address_type: PhantomData,
}
}
}
impl<ATYPE: AddressType> SubAssign for Address<ATYPE> {
fn sub_assign(&mut self, other: Self) {
*self = Self {
value: self.value - other.into_usize(),
_address_type: PhantomData,
};
}
}
impl fmt::Display for Address<Physical> {
// Don't expect to see physical addresses greater than 40 bit.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q3: u8 = ((self.value >> 32) & 0xff) as u8;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:02x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
impl fmt::Display for Address<Virtual> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let q4: u16 = ((self.value >> 48) & 0xffff) as u16;
let q3: u16 = ((self.value >> 32) & 0xffff) as u16;
let q2: u16 = ((self.value >> 16) & 0xffff) as u16;
let q1: u16 = (self.value & 0xffff) as u16;
write!(f, "0x")?;
write!(f, "{:04x}_", q4)?;
write!(f, "{:04x}_", q3)?;
write!(f, "{:04x}_", q2)?;
write!(f, "{:04x}", q1)
}
}
/// Zero out an inclusive memory range.
///
/// # Safety
///
/// - `range.start` and `range.end` must be valid.
/// - `range.start` and `range.end` must be `T` aligned.
pub unsafe fn zero_volatile<T>(range: RangeInclusive<*mut T>)
where
T: From<u8>,
{
let mut ptr = *range.start();
let end_inclusive = *range.end();
while ptr <= end_inclusive {
core::ptr::write_volatile(ptr, T::from(0));
ptr = ptr.offset(1);
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check `zero_volatile()`.
#[kernel_test]
fn zero_volatile_works() {
let mut x: [usize; 3] = [10, 11, 12];
let x_range = x.as_mut_ptr_range();
let x_range_inclusive =
RangeInclusive::new(x_range.start, unsafe { x_range.end.offset(-1) });
unsafe { zero_volatile(x_range_inclusive) };
assert_eq!(x, [0, 0, 0]);
}
/// Check `bss` section layout.
#[kernel_test]
fn bss_section_is_sane() {
use crate::bsp::memory::bss_range_inclusive;
use core::mem;
let start = *bss_range_inclusive().start() as usize;
let end = *bss_range_inclusive().end() as usize;
assert_eq!(start % mem::size_of::<usize>(), 0);
assert_eq!(end % mem::size_of::<usize>(), 0);
assert!(end >= start);
}
}

@ -0,0 +1,270 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management Unit.
#[cfg(target_arch = "aarch64")]
#[path = "../_arch/aarch64/memory/mmu.rs"]
mod arch_mmu;
mod mapping_record;
mod translation_table;
mod types;
use crate::{
bsp,
memory::{Address, Physical, Virtual},
synchronization, warn,
};
use core::fmt;
pub use types::*;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// MMU enable errors variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum MMUEnableError {
AlreadyEnabled,
Other(&'static str),
}
/// Translation error variants.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum TranslationError {
MMUDisabled,
Aborted,
}
/// Memory Management interfaces.
pub mod interface {
use super::*;
/// MMU functions.
pub trait MMU {
/// Turns on the MMU for the first time and enables data and instruction caching.
///
/// # Safety
///
/// - Changes the HW's global state.
unsafe fn enable_mmu_and_caching(
&self,
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError>;
/// Returns true if the MMU is enabled, false otherwise.
fn is_enabled(&self) -> bool;
/// Try to translate a virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input VA.
fn try_virt_to_phys(
&self,
virt: Address<Virtual>,
) -> Result<Address<Physical>, TranslationError>;
}
}
/// Describes the characteristics of a translation granule.
pub struct TranslationGranule<const GRANULE_SIZE: usize>;
/// Describes properties of an address space.
pub struct AddressSpace<const AS_SIZE: usize>;
/// Intended to be implemented for [`AddressSpace`].
pub trait AssociatedTranslationTable {
/// A translation table whose address range is:
///
/// [u64::MAX, (u64::MAX - AS_SIZE) + 1]
type TableStartFromTop;
/// A translation table whose address range is:
///
/// [0, AS_SIZE - 1]
type TableStartFromBottom;
}
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
use interface::MMU;
use synchronization::interface::ReadWriteEx;
use translation_table::interface::TranslationTable;
/// Map pages in the kernel's translation tables.
///
/// No input checks done, input is passed through to the architectural implementation.
///
/// # Safety
///
/// - See `map_pages_at()`.
/// - Does not prevent aliasing.
unsafe fn kernel_map_pages_at_unchecked(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.map_pages_at(virt_pages, phys_pages, attr))?;
kernel_add_mapping_record(name, virt_pages, phys_pages, attr);
Ok(())
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
impl fmt::Display for MMUEnableError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MMUEnableError::AlreadyEnabled => write!(f, "MMU is already enabled"),
MMUEnableError::Other(x) => write!(f, "{}", x),
}
}
}
impl<const GRANULE_SIZE: usize> TranslationGranule<GRANULE_SIZE> {
/// The granule's size.
pub const SIZE: usize = Self::size_checked();
/// The granule's mask.
pub const MASK: usize = Self::SIZE - 1;
/// The granule's shift, aka log2(size).
pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(GRANULE_SIZE.is_power_of_two());
GRANULE_SIZE
}
}
impl<const AS_SIZE: usize> AddressSpace<AS_SIZE> {
/// The address space size.
pub const SIZE: usize = Self::size_checked();
/// The address space shift, aka log2(size).
pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize;
const fn size_checked() -> usize {
assert!(AS_SIZE.is_power_of_two());
// Check for architectural restrictions as well.
Self::arch_address_space_size_sanity_checks();
AS_SIZE
}
}
/// Add an entry to the mapping info record.
pub fn kernel_add_mapping_record(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) {
if let Err(x) = mapping_record::kernel_add(name, virt_pages, phys_pages, attr) {
warn!("{}", x);
}
}
/// Raw mapping of virtual to physical pages in the kernel translation tables.
///
/// Prevents mapping into the MMIO range of the tables.
///
/// # Safety
///
/// - See `kernel_map_pages_at_unchecked()`.
/// - Does not prevent aliasing. Currently, the callers must be trusted.
pub unsafe fn kernel_map_pages_at(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let is_mmio = bsp::memory::mmu::kernel_translation_tables()
.read(|tables| tables.is_virt_page_slice_mmio(virt_pages));
if is_mmio {
return Err("Attempt to manually map into MMIO region");
}
kernel_map_pages_at_unchecked(name, virt_pages, phys_pages, attr)?;
Ok(())
}
/// MMIO remapping in the kernel translation tables.
///
/// Typically used by device drivers.
///
/// # Safety
///
/// - Same as `kernel_map_pages_at_unchecked()`, minus the aliasing part.
pub unsafe fn kernel_map_mmio(
name: &'static str,
mmio_descriptor: &MMIODescriptor,
) -> Result<Address<Virtual>, &'static str> {
let phys_pages: PageSliceDescriptor<Physical> = mmio_descriptor.clone().into();
let offset_into_start_page =
mmio_descriptor.start_addr().into_usize() & bsp::memory::mmu::KernelGranule::MASK;
// Check if an identical page slice has been mapped for another driver. If so, reuse it.
let virt_addr = if let Some(addr) =
mapping_record::kernel_find_and_insert_mmio_duplicate(mmio_descriptor, name)
{
addr
// Otherwise, allocate a new virtual page slice and map it.
} else {
let virt_pages: PageSliceDescriptor<Virtual> =
bsp::memory::mmu::kernel_translation_tables()
.write(|tables| tables.next_mmio_virt_page_slice(phys_pages.num_pages()))?;
kernel_map_pages_at_unchecked(
name,
&virt_pages,
&phys_pages,
&AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
)?;
virt_pages.start_addr()
};
Ok(virt_addr + offset_into_start_page)
}
/// Try to translate a virtual address to a physical address.
///
/// Will only succeed if there exists a valid mapping for the input VA.
pub fn try_virt_to_phys(virt: Address<Virtual>) -> Result<Address<Physical>, TranslationError> {
arch_mmu::mmu().try_virt_to_phys(virt)
}
/// Enable the MMU and data + instruction caching.
///
/// # Safety
///
/// - Crucial function during kernel init. Changes the the complete memory view of the processor.
#[inline(always)]
pub unsafe fn enable_mmu_and_caching(
phys_tables_base_addr: Address<Physical>,
) -> Result<(), MMUEnableError> {
arch_mmu::mmu().enable_mmu_and_caching(phys_tables_base_addr)
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print_mappings() {
mapping_record::kernel_print()
}

@ -0,0 +1,221 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! A record of mapped pages.
use super::{
AccessPermissions, Address, AttributeFields, MMIODescriptor, MemAttributes,
PageSliceDescriptor, Physical, Virtual,
};
use crate::{info, synchronization, synchronization::InitStateLock, warn};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Type describing a virtual memory mapping.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
struct MappingRecordEntry {
pub users: [Option<&'static str>; 5],
pub phys_pages: PageSliceDescriptor<Physical>,
pub virt_start_addr: Address<Virtual>,
pub attribute_fields: AttributeFields,
}
struct MappingRecord {
inner: [Option<MappingRecordEntry>; 12],
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static KERNEL_MAPPING_RECORD: InitStateLock<MappingRecord> =
InitStateLock::new(MappingRecord::new());
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
impl MappingRecordEntry {
pub fn new(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Self {
Self {
users: [Some(name), None, None, None, None],
phys_pages: *phys_pages,
virt_start_addr: virt_pages.start_addr(),
attribute_fields: *attr,
}
}
fn find_next_free_user(&mut self) -> Result<&mut Option<&'static str>, &'static str> {
if let Some(x) = self.users.iter_mut().find(|x| x.is_none()) {
return Ok(x);
};
Err("Storage for user info exhausted")
}
pub fn add_user(&mut self, user: &'static str) -> Result<(), &'static str> {
let x = self.find_next_free_user()?;
*x = Some(user);
Ok(())
}
}
impl MappingRecord {
pub const fn new() -> Self {
Self { inner: [None; 12] }
}
fn find_next_free(&mut self) -> Result<&mut Option<MappingRecordEntry>, &'static str> {
if let Some(x) = self.inner.iter_mut().find(|x| x.is_none()) {
return Ok(x);
}
Err("Storage for mapping info exhausted")
}
fn find_duplicate(
&mut self,
phys_pages: &PageSliceDescriptor<Physical>,
) -> Option<&mut MappingRecordEntry> {
self.inner
.iter_mut()
.filter(|x| x.is_some())
.map(|x| x.as_mut().unwrap())
.filter(|x| x.attribute_fields.mem_attributes == MemAttributes::Device)
.find(|x| x.phys_pages == *phys_pages)
}
pub fn add(
&mut self,
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
let x = self.find_next_free()?;
*x = Some(MappingRecordEntry::new(name, virt_pages, phys_pages, attr));
Ok(())
}
pub fn print(&self) {
const KIB_RSHIFT: u32 = 10; // log2(1024).
const MIB_RSHIFT: u32 = 20; // log2(1024 * 1024).
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
info!(
" {:^44} {:^30} {:^7} {:^9} {:^35}",
"Virtual", "Physical", "Size", "Attr", "Entity"
);
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
for i in self
.inner
.iter()
.filter(|x| x.is_some())
.map(|x| x.unwrap())
{
let virt_start = i.virt_start_addr;
let virt_end_inclusive = virt_start + i.phys_pages.size() - 1;
let phys_start = i.phys_pages.start_addr();
let phys_end_inclusive = i.phys_pages.end_addr_inclusive();
let size = i.phys_pages.size();
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
} else if (size >> KIB_RSHIFT) > 0 {
(size >> KIB_RSHIFT, "KiB")
} else {
(size, "Byte")
};
let attr = match i.attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::Device => "Dev",
};
let acc_p = match i.attribute_fields.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if i.attribute_fields.execute_never {
"XN"
} else {
"X"
};
info!(
" {}..{} --> {}..{} | \
{: >3} {} | {: <3} {} {: <2} | {}",
virt_start,
virt_end_inclusive,
phys_start,
phys_end_inclusive,
size,
unit,
attr,
acc_p,
xn,
i.users[0].unwrap()
);
for k in i.users[1..].iter() {
if let Some(additional_user) = *k {
info!(
" | {}",
additional_user
);
}
}
}
info!(" -------------------------------------------------------------------------------------------------------------------------------------------");
}
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
use synchronization::interface::ReadWriteEx;
/// Add an entry to the mapping info record.
pub fn kernel_add(
name: &'static str,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str> {
KERNEL_MAPPING_RECORD.write(|mr| mr.add(name, virt_pages, phys_pages, attr))
}
pub fn kernel_find_and_insert_mmio_duplicate(
mmio_descriptor: &MMIODescriptor,
new_user: &'static str,
) -> Option<Address<Virtual>> {
let phys_pages: PageSliceDescriptor<Physical> = mmio_descriptor.clone().into();
KERNEL_MAPPING_RECORD.write(|mr| {
let dup = mr.find_duplicate(&phys_pages)?;
if let Err(x) = dup.add_user(new_user) {
warn!("{}", x);
}
Some(dup.virt_start_addr)
})
}
/// Human-readable print of all recorded kernel mappings.
pub fn kernel_print() {
KERNEL_MAPPING_RECORD.read(|mr| mr.print());
}

@ -0,0 +1,109 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
//! Translation table.
#[cfg(target_arch = "aarch64")]
#[path = "../../_arch/aarch64/memory/mmu/translation_table.rs"]
mod arch_translation_table;
use crate::memory::{
mmu::{AttributeFields, PageSliceDescriptor},
Physical, Virtual,
};
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
#[cfg(target_arch = "aarch64")]
pub use arch_translation_table::FixedSizeTranslationTable;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Translation table interfaces.
pub mod interface {
use super::*;
/// Translation table operations.
pub trait TranslationTable {
/// Anything that needs to run before any of the other provided functions can be used.
///
/// # Safety
///
/// - Implementor must ensure that this function can run only once or is harmless if invoked
/// multiple times.
fn init(&mut self) -> Result<(), &'static str>;
/// Map the given virtual pages to the given physical pages.
///
/// # Safety
///
/// - Using wrong attributes can cause multiple issues of different nature in the system.
/// - It is not required that the architectural implementation prevents aliasing. That is,
/// mapping to the same physical memory using multiple virtual addresses, which would
/// break Rust's ownership assumptions. This should be protected against in the kernel's
/// generic MMU code.
unsafe fn map_pages_at(
&mut self,
virt_pages: &PageSliceDescriptor<Virtual>,
phys_pages: &PageSliceDescriptor<Physical>,
attr: &AttributeFields,
) -> Result<(), &'static str>;
/// Obtain a free virtual page slice in the MMIO region.
///
/// The "MMIO region" is a distinct region of the implementor's choice, which allows
/// differentiating MMIO addresses from others. This can speed up debugging efforts.
/// Ideally, those MMIO addresses are also standing out visually so that a human eye can
/// identify them. For example, by allocating them from near the end of the virtual address
/// space.
fn next_mmio_virt_page_slice(
&mut self,
num_pages: usize,
) -> Result<PageSliceDescriptor<Virtual>, &'static str>;
/// Check if a virtual page splice is in the "MMIO region".
fn is_virt_page_slice_mmio(&self, virt_pages: &PageSliceDescriptor<Virtual>) -> bool;
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::{bsp, memory::Address};
use arch_translation_table::MinSizeTranslationTable;
use interface::TranslationTable;
use test_macros::kernel_test;
/// Sanity checks for the TranslationTable implementation.
#[kernel_test]
fn translationtable_implementation_sanity() {
// This will occupy a lot of space on the stack.
let mut tables = MinSizeTranslationTable::new_for_runtime();
assert!(tables.init().is_ok());
let x = tables.next_mmio_virt_page_slice(0);
assert!(x.is_err());
let x = tables.next_mmio_virt_page_slice(1_0000_0000);
assert!(x.is_err());
let x = tables.next_mmio_virt_page_slice(2).unwrap();
assert_eq!(x.size(), bsp::memory::mmu::KernelGranule::SIZE * 2);
assert_eq!(tables.is_virt_page_slice_mmio(&x), true);
assert_eq!(
tables.is_virt_page_slice_mmio(&PageSliceDescriptor::from_addr(Address::new(0), 1)),
false
);
}
}

@ -0,0 +1,217 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management Unit types.
use crate::{
bsp, common,
memory::{Address, AddressType, Physical, Virtual},
};
use core::{
convert::{From, TryFrom},
marker::PhantomData,
};
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Generic page type.
#[repr(C)]
pub struct Page<ATYPE: AddressType> {
inner: [u8; bsp::memory::mmu::KernelGranule::SIZE],
_address_type: PhantomData<ATYPE>,
}
/// Type describing a slice of pages.
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub struct PageSliceDescriptor<ATYPE: AddressType> {
start: Address<ATYPE>,
num_pages: usize,
}
/// Architecture agnostic memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone, PartialOrd, PartialEq)]
pub enum MemAttributes {
CacheableDRAM,
Device,
}
/// Architecture agnostic access permissions.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
pub enum AccessPermissions {
ReadOnly,
ReadWrite,
}
/// Collection of memory attributes.
#[allow(missing_docs)]
#[derive(Copy, Clone)]
pub struct AttributeFields {
pub mem_attributes: MemAttributes,
pub acc_perms: AccessPermissions,
pub execute_never: bool,
}
/// An MMIO descriptor for use in device drivers.
#[derive(Copy, Clone)]
pub struct MMIODescriptor {
start_addr: Address<Physical>,
size: usize,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Page
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> Page<ATYPE> {
/// Get a pointer to the instance.
pub const fn as_ptr(&self) -> *const Page<ATYPE> {
self as *const _
}
}
//------------------------------------------------------------------------------
// PageSliceDescriptor
//------------------------------------------------------------------------------
impl<ATYPE: AddressType> PageSliceDescriptor<ATYPE> {
/// Create an instance.
pub const fn from_addr(start: Address<ATYPE>, num_pages: usize) -> Self {
assert!(common::is_aligned(
start.into_usize(),
bsp::memory::mmu::KernelGranule::SIZE
));
assert!(num_pages > 0);
Self { start, num_pages }
}
/// Return a pointer to the first page of the described slice.
const fn first_page_ptr(&self) -> *const Page<ATYPE> {
self.start.into_usize() as *const _
}
/// Return the number of Pages the slice describes.
pub const fn num_pages(&self) -> usize {
self.num_pages
}
/// Return the memory size this descriptor spans.
pub const fn size(&self) -> usize {
self.num_pages * bsp::memory::mmu::KernelGranule::SIZE
}
/// Return the start address.
pub const fn start_addr(&self) -> Address<ATYPE> {
self.start
}
/// Return the exclusive end address.
pub fn end_addr(&self) -> Address<ATYPE> {
self.start + self.size()
}
/// Return the inclusive end address.
pub fn end_addr_inclusive(&self) -> Address<ATYPE> {
self.start + (self.size() - 1)
}
/// Check if an address is contained within this descriptor.
pub fn contains(&self, addr: Address<ATYPE>) -> bool {
(addr >= self.start_addr()) && (addr <= self.end_addr_inclusive())
}
/// Return a non-mutable slice of Pages.
///
/// # Safety
///
/// - Same as applies for `core::slice::from_raw_parts`.
pub unsafe fn as_slice(&self) -> &[Page<ATYPE>] {
core::slice::from_raw_parts(self.first_page_ptr(), self.num_pages)
}
}
impl TryFrom<PageSliceDescriptor<Virtual>> for PageSliceDescriptor<Physical> {
type Error = super::TranslationError;
fn try_from(desc: PageSliceDescriptor<Virtual>) -> Result<Self, Self::Error> {
let phys_start = super::try_virt_to_phys(desc.start)?;
Ok(Self {
start: phys_start,
num_pages: desc.num_pages,
})
}
}
impl From<MMIODescriptor> for PageSliceDescriptor<Physical> {
fn from(desc: MMIODescriptor) -> Self {
let start_page_addr = desc
.start_addr
.align_down(bsp::memory::mmu::KernelGranule::SIZE);
let len = ((desc.end_addr_inclusive().into_usize() - start_page_addr.into_usize())
>> bsp::memory::mmu::KernelGranule::SHIFT)
+ 1;
Self {
start: start_page_addr,
num_pages: len,
}
}
}
//------------------------------------------------------------------------------
// MMIODescriptor
//------------------------------------------------------------------------------
impl MMIODescriptor {
/// Create an instance.
pub const fn new(start_addr: Address<Physical>, size: usize) -> Self {
assert!(size > 0);
Self { start_addr, size }
}
/// Return the start address.
pub const fn start_addr(&self) -> Address<Physical> {
self.start_addr
}
/// Return the inclusive end address.
pub fn end_addr_inclusive(&self) -> Address<Physical> {
self.start_addr + (self.size - 1)
}
/// Return the size.
pub const fn size(&self) -> usize {
self.size
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// Check if the size of `struct Page` is as expected.
#[kernel_test]
fn size_of_page_equals_granule_size() {
assert_eq!(
core::mem::size_of::<Page<Physical>>(),
bsp::memory::mmu::KernelGranule::SIZE
);
}
}

@ -0,0 +1,58 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! A panic handler that infinitely waits.
use crate::{bsp, cpu, exception};
use core::{fmt, panic::PanicInfo};
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
fn _panic_print(args: fmt::Arguments) {
use fmt::Write;
unsafe { bsp::console::panic_console_out().write_fmt(args).unwrap() };
}
/// The point of exit for `libkernel`.
///
/// It is linked weakly, so that the integration tests can overload its standard behavior.
#[linkage = "weak"]
#[no_mangle]
fn _panic_exit() -> ! {
#[cfg(not(test_build))]
{
cpu::wait_forever()
}
#[cfg(test_build)]
{
cpu::qemu_exit_failure()
}
}
/// Prints with a newline - only use from the panic handler.
///
/// Carbon copy from <https://doc.rust-lang.org/src/std/macros.rs.html>
#[macro_export]
macro_rules! panic_println {
($($arg:tt)*) => ({
_panic_print(format_args_nl!($($arg)*));
})
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
unsafe { exception::asynchronous::local_irq_mask() };
if let Some(args) = info.message() {
panic_println!("\nKernel panic: {}", args);
} else {
panic_println!("\nKernel panic!");
}
_panic_exit()
}

@ -0,0 +1,106 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Printing.
use crate::{bsp, console};
use core::fmt;
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use console::interface::Write;
bsp::console::console().write_fmt(args).unwrap();
}
/// Prints without a newline.
///
/// Carbon copy from <https://doc.rust-lang.org/src/std/macros.rs.html>
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::print::_print(format_args!($($arg)*)));
}
/// Prints with a newline.
///
/// Carbon copy from <https://doc.rust-lang.org/src/std/macros.rs.html>
#[macro_export]
macro_rules! println {
() => ($crate::print!("\n"));
($($arg:tt)*) => ({
$crate::print::_print(format_args_nl!($($arg)*));
})
}
/// Prints an info, with a newline.
#[macro_export]
macro_rules! info {
($string:expr) => ({
#[allow(unused_imports)]
use crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[ {:>3}.{:03}{:03}] ", $string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000
));
});
($format_string:expr, $($arg:tt)*) => ({
#[allow(unused_imports)]
use crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[ {:>3}.{:03}{:03}] ", $format_string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000,
$($arg)*
));
})
}
/// Prints a warning, with a newline.
#[macro_export]
macro_rules! warn {
($string:expr) => ({
#[allow(unused_imports)]
use crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[W {:>3}.{:03}{:03}] ", $string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000
));
});
($format_string:expr, $($arg:tt)*) => ({
#[allow(unused_imports)]
use crate::time::interface::TimeManager;
let timestamp = $crate::time::time_manager().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[W {:>3}.{:03}{:03}] ", $format_string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000,
$($arg)*
));
})
}

@ -0,0 +1,41 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2018-2021 Andre Richter <andre.o.richter@gmail.com>
//! Rust runtime initialization code.
use crate::{bsp, memory};
//--------------------------------------------------------------------------------------------------
// Private Code
//--------------------------------------------------------------------------------------------------
/// Zero out the .bss section.
///
/// # Safety
///
/// - Must only be called pre `kernel_init()`.
#[inline(always)]
unsafe fn zero_bss() {
memory::zero_volatile(bsp::memory::bss_range_inclusive());
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Equivalent to `crt0` or `c0` code in C/C++ world. Clears the `bss` section, then jumps to kernel
/// init code.
///
/// # Safety
///
/// - Only a single core must be active and running this function.
#[no_mangle]
pub unsafe fn runtime_init() -> ! {
extern "Rust" {
fn kernel_init() -> !;
}
zero_bss();
kernel_init()
}

@ -0,0 +1,92 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! State information about the kernel itself.
use core::sync::atomic::{AtomicU8, Ordering};
//--------------------------------------------------------------------------------------------------
// Private Definitions
//--------------------------------------------------------------------------------------------------
/// Different stages in the kernel execution.
#[derive(Copy, Clone, Eq, PartialEq)]
enum State {
/// The kernel starts booting in this state.
Init,
/// The kernel transitions to this state when jumping to `kernel_main()` (at the end of
/// `kernel_init()`, after all init calls are done).
SingleCoreMain,
/// The kernel transitions to this state when it boots the secondary cores, aka switches
/// exectution mode to symmetric multiprocessing (SMP).
MultiCoreMain,
}
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Maintains the kernel state and state transitions.
pub struct StateManager(AtomicU8);
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static STATE_MANAGER: StateManager = StateManager::new();
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
/// Return a reference to the global StateManager.
pub fn state_manager() -> &'static StateManager {
&STATE_MANAGER
}
impl StateManager {
const INIT: u8 = 0;
const SINGLE_CORE_MAIN: u8 = 1;
const MULTI_CORE_MAIN: u8 = 2;
/// Create a new instance.
pub const fn new() -> Self {
Self(AtomicU8::new(Self::INIT))
}
/// Return the current state.
fn state(&self) -> State {
let state = self.0.load(Ordering::Acquire);
match state {
Self::INIT => State::Init,
Self::SINGLE_CORE_MAIN => State::SingleCoreMain,
Self::MULTI_CORE_MAIN => State::MultiCoreMain,
_ => panic!("Invalid KERNEL_STATE"),
}
}
/// Return if the kernel is init state.
pub fn is_init(&self) -> bool {
self.state() == State::Init
}
/// Transition from Init to SingleCoreMain.
pub fn transition_to_single_core_main(&self) {
if self
.0
.compare_exchange(
Self::INIT,
Self::SINGLE_CORE_MAIN,
Ordering::Acquire,
Ordering::Relaxed,
)
.is_err()
{
panic!("transition_to_single_core_main() called while state != Init");
}
}
}

@ -0,0 +1,159 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
//!
//! # Resources
//!
//! - <https://doc.rust-lang.org/book/ch16-04-extensible-concurrency-sync-and-send.html>
//! - <https://stackoverflow.com/questions/59428096/understanding-the-send-trait>
//! - <https://doc.rust-lang.org/std/cell/index.html>
use core::cell::UnsafeCell;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Synchronization interfaces.
pub mod interface {
/// Any object implementing this trait guarantees exclusive access to the data wrapped within
/// the Mutex for the duration of the provided closure.
pub trait Mutex {
/// The type of the data that is wrapped by this mutex.
type Data;
/// Locks the mutex and grants the closure temporary mutable access to the wrapped data.
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
/// A reader-writer exclusion type.
///
/// The implementing object allows either a number of readers or at most one writer at any point
/// in time.
pub trait ReadWriteEx {
/// The type of encapsulated data.
type Data;
/// Grants temporary mutable access to the encapsulated data.
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
/// Grants temporary immutable access to the encapsulated data.
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R;
}
}
/// A pseudo-lock for teaching purposes.
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access from
/// other cores to the contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing on a single core.
pub struct IRQSafeNullLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
/// A pseudo-lock that is RW during the single-core kernel init phase and RO afterwards.
///
/// Intended to encapsulate data that is populated during kernel init when no concurrency exists.
pub struct InitStateLock<T>
where
T: ?Sized,
{
data: UnsafeCell<T>,
}
//--------------------------------------------------------------------------------------------------
// Public Code
//--------------------------------------------------------------------------------------------------
unsafe impl<T> Send for IRQSafeNullLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for IRQSafeNullLock<T> where T: ?Sized + Send {}
impl<T> IRQSafeNullLock<T> {
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
}
}
}
unsafe impl<T> Send for InitStateLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for InitStateLock<T> where T: ?Sized + Send {}
impl<T> InitStateLock<T> {
/// Create an instance.
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
}
}
}
//------------------------------------------------------------------------------
// OS Interface Code
//------------------------------------------------------------------------------
use crate::{exception, state};
impl<T> interface::Mutex for IRQSafeNullLock<T> {
type Data = T;
fn lock<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
let data = unsafe { &mut *self.data.get() };
// Execute the closure while IRQs are masked.
exception::asynchronous::exec_with_irq_masked(|| f(data))
}
}
impl<T> interface::ReadWriteEx for InitStateLock<T> {
type Data = T;
fn write<R>(&self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
assert!(
state::state_manager().is_init(),
"InitStateLock::write called after kernel init phase"
);
assert!(
!exception::asynchronous::is_local_irq_masked(),
"InitStateLock::write called with IRQs unmasked"
);
let data = unsafe { &mut *self.data.get() };
f(data)
}
fn read<R>(&self, f: impl FnOnce(&Self::Data) -> R) -> R {
let data = unsafe { &*self.data.get() };
f(data)
}
}
//--------------------------------------------------------------------------------------------------
// Testing
//--------------------------------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use test_macros::kernel_test;
/// InitStateLock must be transparent.
#[kernel_test]
fn init_state_lock_is_transparent() {
use core::mem::size_of;
assert_eq!(size_of::<InitStateLock<u64>>(), size_of::<u64>());
}
}

@ -0,0 +1,37 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! Timer primitives.
#[cfg(target_arch = "aarch64")]
#[path = "_arch/aarch64/time.rs"]
mod arch_time;
//--------------------------------------------------------------------------------------------------
// Architectural Public Reexports
//--------------------------------------------------------------------------------------------------
pub use arch_time::time_manager;
//--------------------------------------------------------------------------------------------------
// Public Definitions
//--------------------------------------------------------------------------------------------------
/// Timekeeping interfaces.
pub mod interface {
use core::time::Duration;
/// Time management functions.
pub trait TimeManager {
/// The timer's resolution.
fn resolution(&self) -> Duration;
/// The uptime since power-on of the device.
///
/// This includes time consumed by firmware and bootloaders.
fn uptime(&self) -> Duration;
/// Spin for a given duration.
fn spin_for(&self, duration: Duration);
}
}

@ -0,0 +1,14 @@
[package]
name = "test-macros"
version = "0.1.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2018"
[lib]
proc-macro = true
[dependencies]
proc-macro2 = "1.x"
quote = "1.x"
syn = { version = "1.x", features = ["full"] }
test-types = { path = "../test-types" }

@ -0,0 +1,29 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use syn::{parse_macro_input, Ident, ItemFn};
#[proc_macro_attribute]
pub fn kernel_test(_attr: TokenStream, input: TokenStream) -> TokenStream {
let f = parse_macro_input!(input as ItemFn);
let test_name = &format!("{}", f.sig.ident.to_string());
let test_ident = Ident::new(
&format!("{}_TEST_CONTAINER", f.sig.ident.to_string().to_uppercase()),
Span::call_site(),
);
let test_code_block = f.block;
quote!(
#[test_case]
const #test_ident: test_types::UnitTest = test_types::UnitTest {
name: #test_name,
test_func: || #test_code_block,
};
)
.into()
}

@ -0,0 +1,5 @@
[package]
name = "test-types"
version = "0.1.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2018"

@ -0,0 +1,16 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
//! Types for the `custom_test_frameworks` implementation.
#![no_std]
/// Unit test container.
pub struct UnitTest {
/// Name of the test.
pub name: &'static str,
/// Function pointer to the test.
pub test_func: fn(),
}

@ -0,0 +1,50 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
require 'expect'
TIMEOUT_SECS = 3
# Verify sending and receiving works as expected.
class TxRxHandshake
def name
'Transmit and Receive handshake'
end
def run(qemu_out, qemu_in)
qemu_in.write_nonblock('ABC')
raise('TX/RX test failed') if qemu_out.expect('OK1234', TIMEOUT_SECS).nil?
end
end
# Check for correct TX statistics implementation. Depends on test 1 being run first.
class TxStatistics
def name
'Transmit statistics'
end
def run(qemu_out, _qemu_in)
raise('chars_written reported wrong') if qemu_out.expect('6', TIMEOUT_SECS).nil?
end
end
# Check for correct RX statistics implementation. Depends on test 1 being run first.
class RxStatistics
def name
'Receive statistics'
end
def run(qemu_out, _qemu_in)
raise('chars_read reported wrong') if qemu_out.expect('3', TIMEOUT_SECS).nil?
end
end
##--------------------------------------------------------------------------------------------------
## Test registration
##--------------------------------------------------------------------------------------------------
def subtest_collection
[TxRxHandshake.new, TxStatistics.new, RxStatistics.new]
end

@ -0,0 +1,35 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
//! Console sanity tests - RX, TX and statistics.
#![feature(format_args_nl)]
#![no_main]
#![no_std]
use libkernel::{bsp, console, cpu, exception, print};
#[no_mangle]
unsafe fn kernel_init() -> ! {
use bsp::console::console;
use console::interface::*;
exception::handling_init();
bsp::console::qemu_bring_up_console();
// Handshake
assert_eq!(console().read_char(), 'A');
assert_eq!(console().read_char(), 'B');
assert_eq!(console().read_char(), 'C');
print!("OK1234");
// 6
print!("{}", console().chars_written());
// 3
print!("{}", console().chars_read());
// The QEMU process running this test will be closed by the I/O test harness.
cpu::wait_forever()
}

@ -0,0 +1,49 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
//! Timer sanity tests.
#![feature(custom_test_frameworks)]
#![no_main]
#![no_std]
#![reexport_test_harness_main = "test_main"]
#![test_runner(libkernel::test_runner)]
use core::time::Duration;
use libkernel::{bsp, cpu, exception, time, time::interface::TimeManager};
use test_macros::kernel_test;
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
bsp::console::qemu_bring_up_console();
// Depending on CPU arch, some timer bring-up code could go here. Not needed for the RPi.
test_main();
cpu::qemu_exit_success()
}
/// Simple check that the timer is running.
#[kernel_test]
fn timer_is_counting() {
assert!(time::time_manager().uptime().as_nanos() > 0)
}
/// Timer resolution must be sufficient.
#[kernel_test]
fn timer_resolution_is_sufficient() {
assert!(time::time_manager().resolution().as_nanos() < 100)
}
/// Sanity check spin_for() implementation.
#[kernel_test]
fn spin_accuracy_check_1_second() {
let t1 = time::time_manager().uptime();
time::time_manager().spin_for(Duration::from_secs(1));
let t2 = time::time_manager().uptime();
assert_eq!((t2 - t1).as_secs(), 1)
}

@ -0,0 +1,36 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
//! Page faults must result in synchronous exceptions.
#![feature(format_args_nl)]
#![no_main]
#![no_std]
/// Overwrites libkernel's `panic_wait::_panic_exit()` so that it returns a "success" code.
///
/// In this test, teaching the panic is a success, because it is called from the synchronous
/// exception handler, which is what this test wants to achieve.
///
/// It also means that this integration test can not use any other code that calls panic!() directly
/// or indirectly.
mod panic_exit_success;
use libkernel::{bsp, cpu, exception, println};
#[no_mangle]
unsafe fn kernel_init() -> ! {
exception::handling_init();
bsp::console::qemu_bring_up_console();
println!("Testing synchronous exception handling by causing a page fault");
println!("-------------------------------------------------------------------\n");
println!("Writing to bottom of address space to address 1 GiB...");
let big_addr: u64 = 1 * 1024 * 1024 * 1024;
core::ptr::read_volatile(big_addr as *mut u64);
// If execution reaches here, the memory access above did not cause a page fault exception.
cpu::qemu_exit_failure()
}

@ -0,0 +1,66 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2020-2021 Andre Richter <andre.o.richter@gmail.com>
//! IRQ handling sanity tests.
#![feature(custom_test_frameworks)]
#![no_main]
#![no_std]
#![reexport_test_harness_main = "test_main"]
#![test_runner(libkernel::test_runner)]
use libkernel::{bsp, cpu, exception};
use test_macros::kernel_test;
#[no_mangle]
unsafe fn kernel_init() -> ! {
bsp::console::qemu_bring_up_console();
exception::handling_init();
exception::asynchronous::local_irq_unmask();
test_main();
cpu::qemu_exit_success()
}
/// Check that IRQ masking works.
#[kernel_test]
fn local_irq_mask_works() {
// Precondition: IRQs are unmasked.
assert!(exception::asynchronous::is_local_irq_masked());
unsafe { exception::asynchronous::local_irq_mask() };
assert!(!exception::asynchronous::is_local_irq_masked());
// Restore earlier state.
unsafe { exception::asynchronous::local_irq_unmask() };
}
/// Check that IRQ unmasking works.
#[kernel_test]
fn local_irq_unmask_works() {
// Precondition: IRQs are masked.
unsafe { exception::asynchronous::local_irq_mask() };
assert!(!exception::asynchronous::is_local_irq_masked());
unsafe { exception::asynchronous::local_irq_unmask() };
assert!(exception::asynchronous::is_local_irq_masked());
}
/// Check that IRQ mask save is saving "something".
#[kernel_test]
fn local_irq_mask_save_works() {
// Precondition: IRQs are unmasked.
assert!(exception::asynchronous::is_local_irq_masked());
let first = unsafe { exception::asynchronous::local_irq_mask_save() };
assert!(!exception::asynchronous::is_local_irq_masked());
let second = unsafe { exception::asynchronous::local_irq_mask_save() };
assert_ne!(first, second);
unsafe { exception::asynchronous::local_irq_restore(first) };
assert!(exception::asynchronous::is_local_irq_masked());
}

@ -0,0 +1,9 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
/// Overwrites libkernel's `panic_wait::_panic_exit()` with the QEMU-exit version.
#[no_mangle]
fn _panic_exit() -> ! {
libkernel::cpu::qemu_exit_success()
}

@ -0,0 +1,143 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2019-2021 Andre Richter <andre.o.richter@gmail.com>
require 'English'
require 'pty'
# Test base class.
class Test
INDENT = ' '
def print_border(status)
puts
puts "#{INDENT}-------------------------------------------------------------------"
puts status
puts "#{INDENT}-------------------------------------------------------------------\n\n\n"
end
def print_error(error)
puts
print_border("#{INDENT}❌ Failure: #{error}: #{@test_name}")
end
def print_success
print_border("#{INDENT}✅ Success: #{@test_name}")
end
def print_output
puts "#{INDENT}-------------------------------------------------------------------"
print INDENT
print '🦀 '
print @output.join.gsub("\n", "\n#{INDENT}")
end
def finish(error)
print_output
exit_code = if error
print_error(error)
false
else
print_success
true
end
exit(exit_code)
end
end
# Executes tests with console I/O.
class ConsoleTest < Test
def initialize(binary, qemu_cmd, test_name, console_subtests)
super()
@binary = binary
@qemu_cmd = qemu_cmd
@test_name = test_name
@console_subtests = console_subtests
@cur_subtest = 1
@output = ["Running #{@console_subtests.length} console-based tests\n",
"-------------------------------------------------------------------\n\n"]
end
def format_test_name(number, name)
formatted_name = "#{number.to_s.rjust(3)}. #{name}"
formatted_name.ljust(63, '.')
end
def run_subtest(subtest, qemu_out, qemu_in)
@output << format_test_name(@cur_subtest, subtest.name)
subtest.run(qemu_out, qemu_in)
@output << "[ok]\n"
@cur_subtest += 1
end
def exec
error = false
PTY.spawn(@qemu_cmd) do |qemu_out, qemu_in|
begin
@console_subtests.each { |t| run_subtest(t, qemu_out, qemu_in) }
rescue StandardError => e
error = e.message
end
finish(error)
end
end
end
# A wrapper around the bare QEMU invocation.
class RawTest < Test
MAX_WAIT_SECS = 5
def initialize(binary, qemu_cmd, test_name)
super()
@binary = binary
@qemu_cmd = qemu_cmd
@test_name = test_name
@output = []
end
def exec
error = 'Timed out waiting for test'
io = IO.popen(@qemu_cmd)
while IO.select([io], nil, nil, MAX_WAIT_SECS)
begin
@output << io.read_nonblock(1024)
rescue EOFError
io.close
error = $CHILD_STATUS.to_i != 0
break
end
end
finish(error)
end
end
##--------------------------------------------------------------------------------------------------
## Script entry point
##--------------------------------------------------------------------------------------------------
binary = ARGV.last
test_name = binary.gsub(%r{.*deps/}, '').split('-')[0]
console_test_file = "tests/#{test_name}.rb"
qemu_cmd = ARGV.join(' ')
test_runner = if File.exist?(console_test_file)
load console_test_file
# subtest_collection is provided by console_test_file
ConsoleTest.new(binary, qemu_cmd, test_name, subtest_collection)
else
RawTest.new(binary, qemu_cmd, test_name)
end
test_runner.exec

@ -0,0 +1,323 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
# Bitfield manipulation.
class BitField
def initialize
@value = 0
end
def self.attr_bitfield(name, offset, num_bits)
define_method("#{name}=") do |bits|
mask = 2**num_bits - 1
raise "Input out of range: #{name} = 0x#{bits.to_s(16)}" if (bits & ~mask).positive?
# Clear bitfield
@value &= ~(mask << offset)
# Set it
@value |= (bits << offset)
end
end
def to_i
@value
end
def size_in_byte
8
end
end
# An array class that knows its memory location.
class CArray < Array
attr_reader :phys_start_addr
def initialize(phys_start_addr, size, &block)
@phys_start_addr = phys_start_addr
super(size, &block)
end
def size_in_byte
inject(0) { |sum, n| sum + n.size_in_byte }
end
end
#---------------------------------------------------------------------------------------------------
# Arch::
#---------------------------------------------------------------------------------------------------
module Arch
FALSE = 0b0
TRUE = 0b1
#---------------------------------------------------------------------------------------------------
# Arch::ARMv8
#---------------------------------------------------------------------------------------------------
module ARMv8
# ARMv8 Table Descriptor.
class Stage1TableDescriptor < BitField
module NextLevelTableAddr
OFFSET = 16
NUMBITS = 32
end
module Type
OFFSET = 1
NUMBITS = 1
BLOCK = 0
TABLE = 1
end
module Valid
OFFSET = 0
NUMBITS = 1
end
attr_bitfield(:__next_level_table_addr, NextLevelTableAddr::OFFSET, NextLevelTableAddr::NUMBITS)
attr_bitfield(:type, Type::OFFSET, Type::NUMBITS)
attr_bitfield(:valid, Valid::OFFSET, Valid::NUMBITS)
def next_level_table_addr=(addr)
addr = addr >> Granule64KiB::SHIFT
self.__next_level_table_addr = addr
end
private :__next_level_table_addr=
end
# ARMv8 level 3 page descriptor.
class Stage1PageDescriptor < BitField
module UXN
OFFSET = 54
NUMBITS = 1
end
module PXN
OFFSET = 53
NUMBITS = 1
end
module OutputAddr
OFFSET = 16
NUMBITS = 32
end
module AF
OFFSET = 10
NUMBITS = 1
end
module SH
OFFSET = 8
NUMBITS = 2
INNER_SHAREABLE = 0b11
end
module AP
OFFSET = 6
NUMBITS = 2
RW_EL1 = 0b00
RO_EL1 = 0b10
end
module AttrIndx
OFFSET = 2
NUMBITS = 3
end
module Type
OFFSET = 1
NUMBITS = 1
RESERVED_INVALID = 0
PAGE = 1
end
module Valid
OFFSET = 0
NUMBITS = 1
end
attr_bitfield(:uxn, UXN::OFFSET, UXN::NUMBITS)
attr_bitfield(:pxn, PXN::OFFSET, PXN::NUMBITS)
attr_bitfield(:__output_addr, OutputAddr::OFFSET, OutputAddr::NUMBITS)
attr_bitfield(:af, AF::OFFSET, AF::NUMBITS)
attr_bitfield(:sh, SH::OFFSET, SH::NUMBITS)
attr_bitfield(:ap, AP::OFFSET, AP::NUMBITS)
attr_bitfield(:attr_indx, AttrIndx::OFFSET, AttrIndx::NUMBITS)
attr_bitfield(:type, Type::OFFSET, Type::NUMBITS)
attr_bitfield(:valid, Valid::OFFSET, Valid::NUMBITS)
def output_addr=(addr)
addr = addr >> Granule64KiB::SHIFT
self.__output_addr = addr
end
private :__output_addr=
end
# Translation table representing the structure defined in translation_table.rs.
class TranslationTable
MMIO_APERTURE_MiB = 256 * 1024 * 1024
module MAIR
NORMAL = 1
end
def initialize
@virt_mmio_start_addr = (BSP.kernel_virt_addr_space_size - MMIO_APERTURE_MiB) +
BSP.kernel_virt_start_addr
do_sanity_checks
num_lvl2_tables = BSP.kernel_virt_addr_space_size >> Granule512MiB::SHIFT
@lvl3 = new_lvl3(num_lvl2_tables, BSP.phys_table_struct_start_addr)
@lvl2_phys_start_addr = @lvl3.phys_start_addr + @lvl3.size_in_byte
@lvl2 = new_lvl2(num_lvl2_tables, @lvl2_phys_start_addr)
populate_lvl2_entries
end
def map_pages_at(virt_pages, phys_pages, attributes)
return if virt_pages.empty?
raise if virt_pages.size != phys_pages.size
raise if phys_pages.last > BSP.phys_addr_space_end_page
virt_pages.zip(phys_pages).each do |virt_page, phys_page|
desc = page_descriptor_from(virt_page)
set_lvl3_entry(desc, phys_page, attributes)
end
end
def to_binary
data = @lvl3.flatten.map(&:to_i) + @lvl2.map(&:to_i)
data.pack('Q<*') # "Q" == uint64_t, "<" == little endian
end
def phys_tables_base_addr_binary
[@lvl2_phys_start_addr].pack('Q<*') # "Q" == uint64_t, "<" == little endian
end
def phys_tables_base_addr
@lvl2_phys_start_addr
end
private
def binary_with_mmio_clash?
BSP.rw_end_exclusive >= @virt_mmio_start_addr
end
def do_sanity_checks
raise unless BSP.kernel_granule::SIZE == Granule64KiB::SIZE
raise unless (BSP.kernel_virt_addr_space_size % Granule512MiB::SIZE).zero?
# Need to ensure that that the kernel binary does not clash with the upmost 256 MiB of the
# virtual address space, which is reserved for runtime-remapping of MMIO.
return unless binary_with_mmio_clash?
puts format('__data_end_exclusive: 0x%16x', BSP.data_end_exclusive)
puts format('MMIO start: 0x%16x', @virt_mmio_start_addr)
raise 'Kernel virtual addresses clash with 256 MiB MMIO window'
end
def new_lvl3(num_lvl2_tables, start_addr)
CArray.new(start_addr, num_lvl2_tables) do
temp = CArray.new(start_addr, 8192) do
Stage1PageDescriptor.new
end
start_addr += temp.size_in_byte
temp
end
end
def new_lvl2(num_lvl2_tables, start_addr)
CArray.new(start_addr, num_lvl2_tables) do
Stage1TableDescriptor.new
end
end
def populate_lvl2_entries
@lvl2.each_with_index do |descriptor, i|
descriptor.next_level_table_addr = @lvl3[i].phys_start_addr
descriptor.type = Stage1TableDescriptor::Type::TABLE
descriptor.valid = TRUE
end
end
def lvl2_lvl3_index_from(addr)
addr -= BSP.kernel_virt_start_addr
lvl2_index = addr >> Granule512MiB::SHIFT
lvl3_index = (addr & Granule512MiB::MASK) >> Granule64KiB::SHIFT
raise unless lvl2_index < @lvl2.size
[lvl2_index, lvl3_index]
end
def page_descriptor_from(virt_addr)
lvl2_index, lvl3_index = lvl2_lvl3_index_from(virt_addr)
@lvl3[lvl2_index][lvl3_index]
end
# rubocop:disable Metrics/MethodLength
def set_attributes(desc, attributes)
case attributes.mem_attributes
when :CacheableDRAM
desc.sh = Stage1PageDescriptor::SH::INNER_SHAREABLE
desc.attr_indx = MAIR::NORMAL
else
raise 'Invalid input'
end
desc.ap = case attributes.acc_perms
when :ReadOnly
Stage1PageDescriptor::AP::RO_EL1
when :ReadWrite
Stage1PageDescriptor::AP::RW_EL1
else
raise 'Invalid input'
end
desc.pxn = case attributes.execute_never
when :XN
TRUE
when :X
FALSE
else
raise 'Invalid input'
end
desc.uxn = TRUE
end
# rubocop:enable Metrics/MethodLength
def set_lvl3_entry(desc, output_addr, attributes)
desc.output_addr = output_addr
desc.af = TRUE
desc.type = Stage1PageDescriptor::Type::PAGE
desc.valid = TRUE
set_attributes(desc, attributes)
end
end
end
end

@ -0,0 +1,177 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
# Raspberry Pi 3 + 4
class RaspberryPi
attr_reader :kernel_granule, :kernel_virt_addr_space_size, :kernel_virt_start_addr
NM_BINARY = 'aarch64-none-elf-nm'
READELF_BINARY = 'aarch64-none-elf-readelf'
MEMORY_SRC = File.read('src/bsp/raspberrypi/memory.rs').split("\n")
def initialize(kernel_elf)
@kernel_granule = Granule64KiB
@virt_addresses = {
boot_core_stack_start: /__boot_core_stack_start/,
boot_core_stack_end_exclusive: /__boot_core_stack_end_exclusive/,
rx_start: /__rx_start/,
rx_end_exclusive: /__rx_end_exclusive/,
rw_start: /__rw_start/,
rw_end_exclusive: /__rw_end_exclusive/,
table_struct_start_addr: /bsp::.*::memory::mmu::KERNEL_TABLES/,
phys_tables_base_addr: /PHYS_KERNEL_TABLES_BASE_ADDR/
}
symbols = `#{NM_BINARY} --demangle #{kernel_elf}`.split("\n")
@kernel_virt_addr_space_size = parse_from_symbols(symbols, /__kernel_virt_addr_space_size/)
@kernel_virt_start_addr = parse_from_symbols(symbols, /__kernel_virt_start_addr/)
@virt_addresses = parse_from_symbols(symbols, @virt_addresses)
@phys_addresses = virt_to_phys(@virt_addresses)
@descriptors = parse_descriptors
update_max_descriptor_name_length
@text_section_offset_in_elf = parse_text_section_offset_in_elf(kernel_elf)
end
def rw_end_exclusive
@virt_addresses[:rw_end_exclusive]
end
def phys_table_struct_start_addr
@phys_addresses[:table_struct_start_addr]
end
def table_struct_offset_in_kernel_elf
(@virt_addresses[:table_struct_start_addr] - @virt_addresses[:rx_start]) +
@text_section_offset_in_elf
end
def phys_tables_base_addr
@phys_addresses[:phys_tables_base_addr]
end
def phys_tables_base_addr_offset_in_kernel_elf
(@virt_addresses[:phys_tables_base_addr] - @virt_addresses[:rx_start]) +
@text_section_offset_in_elf
end
def phys_addr_space_end_page
x = MEMORY_SRC.grep(/pub const END/)
x = case BSP_TYPE
when :rpi3
x[0]
when :rpi4
x[1]
else
raise
end
x.scan(/\d+/).join.to_i(16)
end
def kernel_map_binary
MappingDescriptor.print_header
@descriptors.each do |i|
print 'Generating'.rjust(12).green.bold
print ' '
puts i.to_s
TRANSLATION_TABLES.map_pages_at(i.virt_pages, i.phys_pages, i.attributes)
end
MappingDescriptor.print_divider
end
private
def parse_from_symbols(symbols, input)
case input.class.to_s
when 'Regexp'
symbols.grep(input).first.split.first.to_i(16)
when 'Hash'
input.transform_values do |val|
symbols.grep(val).first.split.first.to_i(16)
end
else
raise
end
end
def parse_text_section_offset_in_elf(kernel_elf)
`#{READELF_BINARY} --sections #{kernel_elf}`.scan(/.text.*/).first.split.last.to_i(16)
end
def virt_to_phys(input)
case input.class.to_s
when 'Integer'
input - @kernel_virt_start_addr
when 'Hash'
input.transform_values do |val|
val - @kernel_virt_start_addr
end
else
raise
end
end
def descriptor_ro
name = 'Code and RO data'
ro_size = @virt_addresses[:rx_end_exclusive] -
@virt_addresses[:rx_start]
virt_ro_pages = PageArray.new(@virt_addresses[:rx_start], ro_size, @kernel_granule::SIZE)
phys_ro_pages = PageArray.new(@phys_addresses[:rx_start], ro_size, @kernel_granule::SIZE)
ro_attribues = AttributeFields.new(:CacheableDRAM, :ReadOnly, :X)
MappingDescriptor.new(name, virt_ro_pages, phys_ro_pages, ro_attribues)
end
def descriptor_data
name = 'Data and bss'
data_size = @virt_addresses[:rw_end_exclusive] -
@virt_addresses[:rw_start]
virt_data_pages = PageArray.new(@virt_addresses[:rw_start], data_size,
@kernel_granule::SIZE)
phys_data_pages = PageArray.new(@phys_addresses[:rw_start], data_size,
@kernel_granule::SIZE)
data_attribues = AttributeFields.new(:CacheableDRAM, :ReadWrite, :XN)
MappingDescriptor.new(name, virt_data_pages, phys_data_pages, data_attribues)
end
def descriptor_boot_core_stack
name = 'Boot-core stack'
boot_core_stack_size = @virt_addresses[:boot_core_stack_end_exclusive] -
@virt_addresses[:boot_core_stack_start]
virt_boot_core_stack_pages = PageArray.new(@virt_addresses[:boot_core_stack_start],
boot_core_stack_size, @kernel_granule::SIZE)
phys_boot_core_stack_pages = PageArray.new(@phys_addresses[:boot_core_stack_start],
boot_core_stack_size, @kernel_granule::SIZE)
boot_core_stack_attribues = AttributeFields.new(:CacheableDRAM, :ReadWrite, :XN)
MappingDescriptor.new(name, virt_boot_core_stack_pages, phys_boot_core_stack_pages,
boot_core_stack_attribues)
end
def parse_descriptors
[descriptor_ro, descriptor_data, descriptor_boot_core_stack]
end
def update_max_descriptor_name_length
MappingDescriptor.max_descriptor_name_length = @descriptors.map { |i| i.name.size }.max
end
end

@ -0,0 +1,125 @@
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
module Granule64KiB
SIZE = 64 * 1024
SHIFT = Math.log2(SIZE).to_i
end
module Granule512MiB
SIZE = 512 * 1024 * 1024
SHIFT = Math.log2(SIZE).to_i
MASK = SIZE - 1
end
# Monkey-patch Integer with some helper functions.
class Integer
def power_of_two?
self[0].zero?
end
def aligned?(alignment)
raise unless alignment.power_of_two?
(self & (alignment - 1)).zero?
end
def to_hex_underscore(with_leading_zeros: false)
fmt = with_leading_zeros ? '%016x' : '%x'
value = format(fmt, self).to_s.reverse.scan(/.{4}|.+/).join('_').reverse
format('0x%s', value)
end
end
# An array where each value is the start address of a Page.
class PageArray < Array
def initialize(start_addr, size, granule_size)
raise unless start_addr.aligned?(granule_size)
raise unless size.positive?
raise unless (size % granule_size).zero?
num_pages = size / granule_size
super(num_pages) do |i|
i * granule_size + start_addr
end
end
end
# Collection of memory attributes.
class AttributeFields
attr_reader :mem_attributes, :acc_perms, :execute_never
def initialize(mem_attributes, acc_perms, execute_never)
@mem_attributes = mem_attributes
@acc_perms = acc_perms
@execute_never = execute_never
end
end
# A container that describes a one- or many-page virt-to-phys mapping.
class MappingDescriptor
@max_descriptor_name_length = 0
class << self
attr_accessor :max_descriptor_name_length
end
attr_reader :name, :virt_pages, :phys_pages, :attributes
def initialize(name, virt_pages, phys_pages, attributes)
@name = name
@virt_pages = virt_pages
@phys_pages = phys_pages
@attributes = attributes
end
def to_s
name = @name.ljust(self.class.max_descriptor_name_length)
virt_start = @virt_pages.first.to_hex_underscore(with_leading_zeros: true)
size = ((@virt_pages.size * 65_536) / 1024).to_s.rjust(3)
"#{name} | #{virt_start} | #{size} KiB"
end
def self.print_divider
print ' '
print '-' * max_descriptor_name_length
puts '----------------------------------'
end
def self.print_header
print_divider
print ' '
print 'Section'.center(max_descriptor_name_length)
print ' '
print 'Start Virt Addr'.center(21)
print ' '
print 'Size'.center(7)
puts
print_divider
end
end
def kernel_patch_tables(kernel_binary)
print 'Patching'.rjust(12).green.bold
print ' Kernel table struct at physical '
puts BSP.phys_table_struct_start_addr.to_hex_underscore
IO.binwrite(kernel_binary, TRANSLATION_TABLES.to_binary,
BSP.table_struct_offset_in_kernel_elf)
end
def kernel_patch_base_addr(kernel_binary)
print 'Patching'.rjust(12).green.bold
print ' Value of kernel table physical base address ('
print TRANSLATION_TABLES.phys_tables_base_addr.to_hex_underscore
print ') at physical '
puts BSP.phys_tables_base_addr.to_hex_underscore
IO.binwrite(kernel_binary, TRANSLATION_TABLES.phys_tables_base_addr_binary,
BSP.phys_tables_base_addr_offset_in_kernel_elf)
end

@ -0,0 +1,47 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
# SPDX-License-Identifier: MIT OR Apache-2.0
#
# Copyright (c) 2021 Andre Richter <andre.o.richter@gmail.com>
TARGET = ARGV[0].split('-').first.to_sym
BSP_TYPE = ARGV[1].to_sym
kernel_elf = ARGV[2]
require 'rubygems'
require 'bundler/setup'
require 'colorize'
require_relative 'generic'
require_relative 'bsp'
require_relative 'arch'
puts
puts 'Precomputing kernel translation tables and patching kernel ELF'.cyan
start = Time.now
BSP = case BSP_TYPE
when :rpi3, :rpi4
RaspberryPi.new(kernel_elf)
else
raise
end
TRANSLATION_TABLES = case TARGET
when :aarch64
Arch::ARMv8::TranslationTable.new
else
raise
end
BSP.kernel_map_binary
kernel_patch_tables(kernel_elf)
kernel_patch_base_addr(kernel_elf)
elapsed = Time.now - start
print 'Finished'.rjust(12).green.bold
puts " in #{elapsed.round(2)}s"
Loading…
Cancel
Save