Add code for tutorials 12

pull/37/head
Andre Richter 5 years ago
parent 404c32bcde
commit b74d678efc
No known key found for this signature in database
GPG Key ID: 2116C1AB102F615E

@ -0,0 +1,10 @@
{
"editor.formatOnSave": true,
"rust.features": [
"bsp_rpi3"
],
"rust.all_targets": false,
"editor.rulers": [
100
],
}

@ -0,0 +1,40 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "cortex-a"
version = "2.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4aab2f5271d9bf17a52b34dd99993648132df3dacb79312a33332f2b6ae1d0fd"
dependencies = [
"register",
]
[[package]]
name = "kernel"
version = "0.1.0"
dependencies = [
"cortex-a",
"r0",
"register",
]
[[package]]
name = "r0"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2a38df5b15c8d5c7e8654189744d8e396bddc18ad48041a500ce52d6948941f"
[[package]]
name = "register"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c92f7cc61c67d0f283777ce3c678789788d1b48e8f5e822a257513c16194955"
dependencies = [
"tock-registers",
]
[[package]]
name = "tock-registers"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50af9c49c55cfb4437dd78c1fada3be5d088cbe1bea641db8171283503606a70"

@ -0,0 +1,21 @@
[package]
name = "kernel"
version = "0.1.0"
authors = ["Andre Richter <andre.o.richter@gmail.com>"]
edition = "2018"
[package.metadata.cargo-xbuild]
sysroot_path = "../xbuild_sysroot"
# The features section is used to select the target board.
[features]
default = []
bsp_rpi3 = ["cortex-a", "register"]
bsp_rpi4 = ["cortex-a", "register"]
[dependencies]
r0 = "0.2.*"
# Optional dependencies
cortex-a = { version = "2.8.x", optional = true }
register = { version = "0.4.x", optional = true }

@ -0,0 +1,127 @@
## SPDX-License-Identifier: MIT
##
## Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
# Default to the RPi3
ifndef BSP
BSP = rpi3
endif
# BSP-specific arguments
ifeq ($(BSP),rpi3)
TARGET = aarch64-unknown-none-softfloat
OUTPUT = kernel8.img
QEMU_BINARY = qemu-system-aarch64
QEMU_MACHINE_TYPE = raspi3
QEMU_MISC_ARGS = -serial stdio
OPENOCD_ARG = -f /openocd/tcl/interface/ftdi/olimex-arm-usb-tiny-h.cfg -f /openocd/rpi3.cfg
JTAG_BOOT_IMAGE = jtag_boot_rpi3.img
LINKER_FILE = src/bsp/rpi/link.ld
RUSTC_MISC_ARGS = -C target-cpu=cortex-a53
else ifeq ($(BSP),rpi4)
TARGET = aarch64-unknown-none-softfloat
OUTPUT = kernel8.img
# QEMU_BINARY = qemu-system-aarch64
# QEMU_MACHINE_TYPE =
# QEMU_MISC_ARGS = -serial stdio
OPENOCD_ARG = -f /openocd/tcl/interface/ftdi/olimex-arm-usb-tiny-h.cfg -f /openocd/rpi4.cfg
JTAG_BOOT_IMAGE = jtag_boot_rpi4.img
LINKER_FILE = src/bsp/rpi/link.ld
RUSTC_MISC_ARGS = -C target-cpu=cortex-a72
endif
RUSTFLAGS = -C link-arg=-T$(LINKER_FILE) $(RUSTC_MISC_ARGS)
RUSTFLAGS_PEDANTIC = $(RUSTFLAGS) -D warnings -D missing_docs
SOURCES = $(wildcard **/*.rs) $(wildcard **/*.S) $(wildcard **/*.ld)
XRUSTC_CMD = cargo xrustc \
--target=$(TARGET) \
--features bsp_$(BSP) \
--release
CARGO_OUTPUT = target/$(TARGET)/release/kernel
OBJCOPY_CMD = cargo objcopy \
-- \
--strip-all \
-O binary
CONTAINER_UTILS = rustembedded/osdev-utils
DOCKER_CMD = docker run -it --rm
DOCKER_ARG_CURDIR = -v $(shell pwd):/work -w /work
DOCKER_ARG_TTY = --privileged -v /dev:/dev
DOCKER_ARG_JTAG = -v $(shell pwd)/../X1_JTAG_boot:/jtag
DOCKER_ARG_NET = --network host
DOCKER_EXEC_QEMU = $(QEMU_BINARY) -M $(QEMU_MACHINE_TYPE) $(QEMU_MISC_ARGS) -kernel
DOCKER_EXEC_RASPBOOT = raspbootcom
DOCKER_EXEC_RASPBOOT_DEV = /dev/ttyUSB0
# DOCKER_EXEC_RASPBOOT_DEV = /dev/ttyACM0
.PHONY: all doc qemu chainboot jtagboot openocd gdb gdb-opt0 clippy clean readelf objdump nm
all: clean $(OUTPUT)
$(CARGO_OUTPUT): $(SOURCES)
RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(XRUSTC_CMD)
$(OUTPUT): $(CARGO_OUTPUT)
cp $< .
$(OBJCOPY_CMD) $< $(OUTPUT)
doc:
cargo xdoc --target=$(TARGET) --features bsp_$(BSP) --document-private-items
xdg-open target/$(TARGET)/doc/kernel/index.html
ifeq ($(QEMU_MACHINE_TYPE),)
qemu:
@echo "This board is not yet supported for QEMU."
else
qemu: all
$(DOCKER_CMD) $(DOCKER_ARG_CURDIR) $(CONTAINER_UTILS) \
$(DOCKER_EXEC_QEMU) $(OUTPUT)
endif
chainboot: all
$(DOCKER_CMD) $(DOCKER_ARG_CURDIR) $(DOCKER_ARG_TTY) \
$(CONTAINER_UTILS) $(DOCKER_EXEC_RASPBOOT) $(DOCKER_EXEC_RASPBOOT_DEV) \
$(OUTPUT)
jtagboot:
$(DOCKER_CMD) $(DOCKER_ARG_TTY) $(DOCKER_ARG_JTAG) $(CONTAINER_UTILS) \
$(DOCKER_EXEC_RASPBOOT) $(DOCKER_EXEC_RASPBOOT_DEV) \
/jtag/$(JTAG_BOOT_IMAGE)
openocd:
$(DOCKER_CMD) $(DOCKER_ARG_TTY) $(DOCKER_ARG_NET) $(CONTAINER_UTILS) \
openocd $(OPENOCD_ARG)
define gen_gdb
RUSTFLAGS="$(RUSTFLAGS_PEDANTIC)" $(XRUSTC_CMD) $1
cp $(CARGO_OUTPUT) kernel_for_jtag
$(DOCKER_CMD) $(DOCKER_ARG_CURDIR) $(DOCKER_ARG_NET) $(CONTAINER_UTILS) \
gdb-multiarch -q kernel_for_jtag
endef
gdb: clean $(SOURCES)
$(call gen_gdb,-C debuginfo=2)
gdb-opt0: clean $(SOURCES)
$(call gen_gdb,-C debuginfo=2 -C opt-level=0)
clippy:
cargo xclippy --target=$(TARGET) --features bsp_$(BSP)
clean:
rm -rf target
readelf:
readelf -a kernel
objdump:
cargo objdump --target $(TARGET) -- -disassemble -print-imm-hex kernel
nm:
cargo nm --target $(TARGET) -- kernel | sort

@ -0,0 +1,945 @@
# Tutorial 12 - Exceptions: Part 1
## tl;dr
We lay the groundwork for all the architectural `CPU exceptions`. For now, only print an elaborate
system state through a `panic!` call, and halt execution; This will help finding bugs during
development and runtime.
For demo purposes, MMU `page faults` are used to demonstrate (i) returning from an exception and
(ii) the default `panic!` behavior.
## Introduction
Now that we are executing in `EL1`, and have activated the `MMU`, time is due for implementing `CPU
exceptions`. For now, we only set up a scaffold with very basic functionality that will help us to
find bugs along the way. A follow-up `Interrupt` tutorial in the future will continue the work we
start here.
Please note that this tutorial is specific to the `AArch64` architecture. It does not contain any
generic exception handling code yet.
## Exception Types
In `AArch64`, it is differentiated between four types of exceptions. These are:
- Synchronous
- For example, a `data abort` (e.g. `page fault`) or a `system call`. They happen in direct
consequence of executing a certain instruction, hence _synchronously_.
- Interrupt Request (`IRQ`)
- For example, an external device, like a timer, is asserting a physical interrupt line. IRQs
happen _asynchronously_.
- Fast Interrupt Request (`FIQ`)
- These are basically interrupts that take priority over normal IRQs and have some more traits
that make them suitable to implement super-fast processing. However, this is out of scope for
this tutorial. For the sake of keeping these tutorials compact and concise, we will more or less
ignore FIQs and only implement a dummy handler that would halt the CPU core.
- System Error (`SError`)
- Like IRQs, SErrors happen asynchronously and are technically more or less the same. They are
intended to signal rather fatal errors in the system, e.g. if a transaction times out on the
`SoC` interconnect. They are very implementation specific and it is up to the SoC vendor to
decide which events are delivered as SErrors instead of normal IRQs.
## Exception entry
I recommend to read pages 1874-1876 of the [ARMv8 Architecture Reference Manual][ARMv8_Manual] to
understand the mechanisms of taking an exception.
Here's an excerpt of important features for this tutorial:
- Exception entry moves the processor to the same or a higher `Exception Level`, but never to a
lower `EL`.
- The program status is saved in the `SPSR_ELx` register at the target `EL`.
- The preferred return address is saved in the `ELR_ELx` register.
- "Preferred" here means that `ELR_ELx` may hold the instruction address of the instructions that
caused the exception (`synchronous case`) or the first instruction that did not complete due to
an `asynchronous` exception. Details in Chapter D1.10.1 of the [ARMv8 Architecture Reference
Manual][ARMv8_Manual].
- All kinds of exceptions are turned off upon taking an exception, so that by default, exception
handlers can not get interrupted themselves.
- Taking an exception will select the dedicated stack pointer of the target `EL`.
- For example, if an exception in `EL0` is taken, the Stack Pointer Select register `SPSel` will
switch from `0` to `1`, meaning that `SP_EL1` will be used by the exception vector code unless
you explicitly change it back to `SP_EL0`.
### Exception Vectors
`AArch64` has a total of `16` exception vectors. There is one for each of the four kinds that were
introduced already, and additionally, it is taken into account _where_ the exception was taken from
and what the circumstances were.
Here is a copy of the decision table as shown in Chapter D1.10.2 of the [ARMv8 Architecture
Reference Manual][ARMv8_Manual]:
[ARMv8_Manual]: https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile
<table>
<thead>
<tr>
<th rowspan=2>Exception taken from </th>
<th colspan=4>Offset for exception type</th>
</tr>
<tr>
<th>Synchronous</th>
<th>IRQ or vIRQ</th>
<th>FIQ or vFIQ</th>
<th>SError or vSError</th>
</tr>
</thead>
<tbody>
<tr>
<td width="40%">Current Exception level with SP_EL0.</td>
<td align="center">0x000</td>
<td align="center">0x080</td>
<td align="center">0x100</td>
<td align="center">0x180</td>
</tr>
<tr>
<td>Current Exception level with SP_ELx, x>0.</td>
<td align="center">0x200</td>
<td align="center">0x280</td>
<td align="center">0x300</td>
<td align="center">0x380</td>
</tr>
<tr>
<td>Lower Exception level, where the implemented level immediately lower than the target level is using AArch64.</td>
<td align="center">0x400</td>
<td align="center">0x480</td>
<td align="center">0x500</td>
<td align="center">0x580</td>
</tr>
<tr>
<td>Lower Exception level, where the implemented level immediately lower than the target level is using AArch32.</td>
<td align="center">0x600</td>
<td align="center">0x680</td>
<td align="center">0x700</td>
<td align="center">0x780</td>
</tr>
</tbody>
</table>
Since our kernel runs in `EL1`, using `SP_EL1`, if we'd cause a synchronous exception, the exception
vector at offset `0x200` would be executed. But what does that even mean?
## Handler Code and Offsets
In many architectures, Operating Systems register their exception handlers (aka vectors) by
compiling an architecturally defined data structure that stores function pointers to the different
handlers. This can be as simple as an ordinary array of function pointers. The `base address` of
this data structure is then stored into a special purpose register so that the CPU can branch to the
respective handler function upon taking an exception. The classic `x86_64` architecture follows this
principle, for example.
In `AArch64`, it is a bit different. Here, we have the special purpose register as well, called
`VBAR_EL1`: Vector Base Address Register.
However, it does not store the base address of an array of function pointers, but the base address
of a **memory location that contains code** for the 16 handlers, one handler back-to-back after the
other. Each handler can take a maximum space of `0x80` bytes, aka `128` bytes. That's why the table
above shows `offsets`: To indicate at which offset a certain handler starts.
Of course, you are not obliged to cram all your handler code into only 128 bytes. You are free to
branch off to any other functions at any time. Actually, that is needed in most cases anyways,
because the context-saving code alone would take up most of the available space (you'll learn what
context saving is shortly).
Additionally, there is a requirement that the `Vector Base Address` is aligned to `0x800` aka `2048`
bytes.
## Rust and Assembly Implementation
The implementation uses a mix of `Rust` and `Assembly` code.
### Context Save and Restore
Exception vectors, just like any other code, use a bunch of commonly shared processor resources.
Most of all, the set of `General Purpose Registers` (GPRs) that each core in `AArch64` provides
(`x0`-`x30`).
In order to not taint these registers when executing exception vector code, it is general practice
to save these shared resources in memory (the stack, to be precise) as the very first action. This
is commonly described as *saving the context*. Exception vector code can then use the shared
resources in its own code without bothering, and as a last action before returning from exception
handling code, restore the context, so that the processor can continue where it left off before
taking the exception.
Context save and restore is one of the few places in system software where it is strongly advised to
to use some hand-crafted assembly. Introducing `exception.S`:
```asm
/// Call the function provided by parameter `\handler` after saving exception context, providing the
/// same as the first parameter.
.macro CALL_WITH_CONTEXT handler
// Make room on the stack for the exception context.
sub sp, sp, #16 * 17
// Store all general purpose registers on the stack.
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
// Add the exception link register (ELR_EL1) and the saved program status (SPSR_EL1).
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
stp lr, x1, [sp, #16 * 15]
str w2, [sp, #16 * 16]
// x0 is the first argument for the function called through `\handler`.
mov x0, sp
// Call `\handler`.
bl \handler
// After returning from exception handling code, replay the saved context and return via `eret`.
b __exception_restore_context
.endm
```
First, a macro for saving the context is defined. It eventually jumps to follow-up handler code, and
finally restores the context. In advance, we reserve space on the stack for the context. That is,
the 30 `GPRs`, the `link register`, the `saved program status` and the `exception link register`
(holding the preferred return address). Afterwards, we store those registers, save the current stack
address in `x0` and branch off to follow-up handler-code, whose function name is supplied as an
argument to the macro (`\handler`).
The handler code will be written in Rust, but use the platform's `C` ABI. This way, we can define a
function signature that has a pointer to the context-data on the stack as its first argument, and
know that this argument is expected to be in the `x0` register. We need to use the `C` ABI here
because `Rust` has no stable convention ([yet](https://github.com/rust-lang/rfcs/issues/600)).
### Exception Vector Table
Next, we craft the exception vector table:
```asm
.section .exception_vectors, "ax", @progbits
// Align by 2^11 bytes, as demanded by the AArch64 spec. Same as ALIGN(2048) in an ld script.
.align 11
// Export a symbol for the Rust code to use.
.global __exception_vector_start
__exception_vector_start:
// Current exception level with SP_EL0.
// .org sets the offset relative to section start.
//
// It must be ensured that `CALL_WITH_CONTEXT` <= 0x80 bytes.
.org 0x000
CALL_WITH_CONTEXT current_el0_synchronous
.org 0x080
CALL_WITH_CONTEXT current_el0_irq
.org 0x100
FIQ_SUSPEND
.org 0x180
CALL_WITH_CONTEXT current_el0_serror
// Current exception level with SP_ELx, x > 0.
.org 0x200
CALL_WITH_CONTEXT current_elx_synchronous
.org 0x280
CALL_WITH_CONTEXT current_elx_irq
.org 0x300
FIQ_SUSPEND
.org 0x380
CALL_WITH_CONTEXT current_elx_serror
[...]
```
Note how each vector starts at the required offset from the section start using the `.org`
directive. Each macro call introduces an explicit handler function name, which is implemented in
`Rust` in `exception.rs`.
### Implementing handlers
The file `exception.rs` first defines a `struct` of the exception context that is stored on the
stack by the assembly code:
```rust
/// The exception context as it is stored on the stack on exception entry.
#[repr(C)]
struct ExceptionContext {
// General Purpose Registers.
gpr: [u64; 30],
// The link register, aka x30.
lr: u64,
// Exception link register. The program counter at the time the exception happened.
elr_el1: u64,
// Saved program status.
spsr_el1: SpsrEL1,
}
```
The handlers take a `struct ExceptionContext` argument. Since we do not plan to implement handlers
for each exception yet, a default handler is provided:
```rust
/// Print verbose information about the exception and the panic.
fn default_exception_handler(e: &ExceptionContext) {
panic!(
"\n\nCPU Exception!\n\
FAR_EL1: {:#018x}\n\
{}\n\
{}",
FAR_EL1.get(),
EsrEL1 {},
e
);
}
```
The actual handlers referenced from the assembly can now branch to it for the time being, e.g.:
```rust
#[no_mangle]
unsafe extern "C" fn current_el0_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
```
## Causing an Exception - Testing the Code
We want to see two cases in action:
1. How taking, handling and returning from an exception works.
2. How the `panic!` print for unhandled exceptions looks like.
So after setting up exceptions in `main.rs` by calling
```rust
arch::enable_exception_handling();
```
we cause a data abort exception by reading from memory address `8 GiB`:
```rust
// Cause an exception by accessing a virtual address for which no translation was set up. This
// code accesses the address 8 GiB, which is outside the mapped address space.
//
// For demo purposes, the exception handler will catch the faulting 8 GiB address and allow
// execution to continue.
info!("");
info!("Trying to write to address 8 GiB...");
let mut big_addr: u64 = 8 * 1024 * 1024 * 1024;
unsafe { core::ptr::read_volatile(big_addr as *mut u64) };
```
This triggers our exception code, because we try to read from a virtual address for which no mapping
has been installed. Remember, we only installed identity-mapped page tables for the first `1 GiB`
(RPi3) or `4 GiB` (RPi4) of address space in the previous tutorial.
To survive this exception, the respective handler has a special demo case:
```rust
/// Asynchronous exception taken from the current EL, using SP of the current EL.
#[no_mangle]
unsafe extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) {
let far_el1 = FAR_EL1.extract().get();
// This catches the demo case for this tutorial. If the fault address happens to be 8 GiB,
// advance the exception link register for one instruction, so that execution can continue.
if far_el1 == 8 * 1024 * 1024 * 1024 {
e.elr_el1 += 4;
asm::eret()
}
default_exception_handler(e);
}
```
It checks if the faulting address equals `8 GiB`, and if so, advances the copy of the `ELR` by 4,
which makes it point to the next instruction after the instruction that caused the exception. When
this handler returns, execution continues in the assembly macro we introduced before. The macro has
only one more line left: `b __exception_restore_context`, which jumps to an assembly function that
plays back our saved context before finally executing `eret` to return from the exception.
This will kick us back into `main.rs`. But we also want to see the `panic!` print.
Therefore, a second read is done, this time from address `9 GiB`. A case which the handler will not
catch, eventually triggering the `panic!` call from the default handler.
## Test it
Emphasis on the events at timestamps > `6.xxxxxx`.
```console
make chainboot
[...]
### Listening on /dev/ttyUSB0
__ __ _ _ _ _
| \/ (_)_ _ (_) | ___ __ _ __| |
| |\/| | | ' \| | |__/ _ \/ _` / _` |
|_| |_|_|_||_|_|____\___/\__,_\__,_|
Raspberry Pi 3
[ML] Requesting binary
### sending kernel kernel8.img [65560 byte]
### finished sending
[ML] Loaded! Executing the payload now
[ 5.780890] Booting on: Raspberry Pi 3
[ 5.783142] MMU online. Special regions:
[ 5.787050] 0x00080000 - 0x0008ffff | 64 KiB | C RO PX | Kernel code and RO data
[ 5.795298] 0x1fff0000 - 0x1fffffff | 64 KiB | Dev RW PXN | Remapped Device MMIO
[ 5.803285] 0x3f000000 - 0x3fffffff | 16 MiB | Dev RW PXN | Device MMIO
[ 5.810492] Current privilege level: EL1
[ 5.814399] Exception handling state:
[ 5.818045] Debug: Masked
[ 5.821258] SError: Masked
[ 5.824470] IRQ: Masked
[ 5.827683] FIQ: Masked
[ 5.830895] Architectural timer resolution: 52 ns
[ 5.835584] Drivers loaded:
[ 5.838362] 1. GPIO
[ 5.840967] 2. PL011Uart
[ 5.844006] Timer test, spinning for 1 second
[ 6.848348]
[ 6.848352] Trying to write to address 8 GiB...
[ 6.852856] ************************************************
[ 6.858499] Whoa! We recovered from a synchronous exception!
[ 6.864142] ************************************************
[ 6.869786]
[ 6.871262] Let's try again
[ 6.874040] Trying to write to address 9 GiB...
Kernel panic:
CPU Exception!
FAR_EL1: 0x0000000240000000
ESR_EL1: 0x96000004
Exception Class (EC) : 0x25 - Data Abort, current EL
Instr Specific Syndrome (ISS): 0x4
ELR_EL1: 0x0000000000080e0c
SPSR_EL1: 0x600003c5
Flags:
Negative (N): Not set
Zero (Z): Set
Carry (C): Set
Overflow (V): Not set
Exception handling state:
Debug (D): Masked
SError (A): Masked
IRQ (I): Masked
FIQ (F): Masked
Illegal Execution State (IL): Not set
General purpose register:
x0 : 0x0000000000000000 x1 : 0x000000000008594e
x2 : 0x0000000000000026 x3 : 0x0000000000000000
x4 : 0x000000000007fc6d x5 : 0x0000000000000002
x6 : 0x0000000000000000 x7 : 0x679198042b2b0209
x8 : 0x0000000240000000 x9 : 0x000000000000000d
x10: 0x000000000000000a x11: 0x000000003f201000
x12: 0x0000000000000019 x13: 0x000000000000000a
x14: 0x000000000007fda8 x15: 0x0000000000000040
x16: 0x0000000000000000 x17: 0x0000000000000040
x18: 0x9a07782900000008 x19: 0x0000000000090008
x20: 0x000000003b9aca00 x21: 0x00000000000003e8
x22: 0x0000000000083064 x23: 0x00000000000831d8
x24: 0x00000000000f4240 x25: 0x00000000000852a8
x26: 0x0000000000085738 x27: 0x0000000000085818
x28: 0x00000000000831d8 x29: 0x0000000000085588
lr : 0x0000000000080e00
```
## Diff to previous
```diff
diff -uNr 11_virtual_memory/src/arch/aarch64/exception.rs 12_exceptions_part1/src/arch/aarch64/exception.rs
--- 11_virtual_memory/src/arch/aarch64/exception.rs
+++ 12_exceptions_part1/src/arch/aarch64/exception.rs
@@ -4,12 +4,248 @@
//! Exception handling.
-use cortex_a::regs::*;
+use core::fmt;
+use cortex_a::{asm, barrier, regs::*};
+use register::InMemoryRegister;
+
+// Assembly counterpart to this file.
+global_asm!(include_str!("exception.S"));
+
+/// Wrapper struct for memory copy of SPSR_EL1
+#[repr(transparent)]
+struct SpsrEL1(InMemoryRegister<u32, SPSR_EL1::Register>);
+
+/// The exception context as it is stored on the stack on exception entry.
+#[repr(C)]
+struct ExceptionContext {
+ // General Purpose Registers.
+ gpr: [u64; 30],
+ // The link register, aka x30.
+ lr: u64,
+ // Exception link register. The program counter at the time the exception happened.
+ elr_el1: u64,
+ // Saved program status.
+ spsr_el1: SpsrEL1,
+}
+
+/// Wrapper struct for pretty printing ESR_EL1
+struct EsrEL1;
+
+//--------------------------------------------------------------------------------------------------
+// Exception vector implementation
+//--------------------------------------------------------------------------------------------------
+
+/// Print verbose information about the exception and the panic.
+fn default_exception_handler(e: &ExceptionContext) {
+ panic!(
+ "\n\nCPU Exception!\n\
+ FAR_EL1: {:#018x}\n\
+ {}\n\
+ {}",
+ FAR_EL1.get(),
+ EsrEL1 {},
+ e
+ );
+}
+
+//--------------------------------------------------------------------------------------------------
+// Current, EL0
+//--------------------------------------------------------------------------------------------------
+
+#[no_mangle]
+unsafe extern "C" fn current_el0_synchronous(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn current_el0_irq(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn current_el0_serror(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+//--------------------------------------------------------------------------------------------------
+// Current, ELx
+//--------------------------------------------------------------------------------------------------
+
+/// Asynchronous exception taken from the current EL, using SP of the current EL.
+#[no_mangle]
+unsafe extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) {
+ let far_el1 = FAR_EL1.extract().get();
+
+ // This catches the demo case for this tutorial. If the fault address happens to be 8 GiB,
+ // advance the exception link register for one instruction, so that execution can continue.
+ if far_el1 == 8 * 1024 * 1024 * 1024 {
+ e.elr_el1 += 4;
+
+ asm::eret()
+ }
+
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn current_elx_irq(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn current_elx_serror(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+//--------------------------------------------------------------------------------------------------
+// Lower, AArch64
+//--------------------------------------------------------------------------------------------------
+
+#[no_mangle]
+unsafe extern "C" fn lower_aarch64_synchronous(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn lower_aarch64_irq(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn lower_aarch64_serror(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+//--------------------------------------------------------------------------------------------------
+// Lower, AArch32
+//--------------------------------------------------------------------------------------------------
+
+#[no_mangle]
+unsafe extern "C" fn lower_aarch32_synchronous(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn lower_aarch32_irq(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+#[no_mangle]
+unsafe extern "C" fn lower_aarch32_serror(e: &mut ExceptionContext) {
+ default_exception_handler(e);
+}
+
+//--------------------------------------------------------------------------------------------------
+// Pretty printing
+//--------------------------------------------------------------------------------------------------
+
+/// Human readable ESR_EL1.
+#[rustfmt::skip]
+impl fmt::Display for EsrEL1 {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let esr_el1 = ESR_EL1.extract();
+
+ // Raw print of whole register.
+ writeln!(f, "ESR_EL1: {:#010x}", esr_el1.get())?;
+
+ // Raw print of exception class.
+ write!(f, " Exception Class (EC) : {:#x}", esr_el1.read(ESR_EL1::EC))?;
+
+ // Exception class, translation.
+ let ec_translation = match esr_el1.read_as_enum(ESR_EL1::EC) {
+ Some(ESR_EL1::EC::Value::DataAbortCurrentEL) => "Data Abort, current EL",
+ _ => "N/A",
+ };
+ writeln!(f, " - {}", ec_translation)?;
+
+ // Raw print of instruction specific syndrome.
+ write!(f, " Instr Specific Syndrome (ISS): {:#x}", esr_el1.read(ESR_EL1::ISS))?;
+
+ Ok(())
+ }
+}
+
+/// Human readable SPSR_EL1.
+#[rustfmt::skip]
+impl fmt::Display for SpsrEL1 {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ // Raw value.
+ writeln!(f, "SPSR_EL1: {:#010x}", self.0.get())?;
+
+ let to_flag_str = |x| -> _ {
+ if x { "Set" } else { "Not set" }
+ };
+
+ writeln!(f, " Flags:")?;
+ writeln!(f, " Negative (N): {}", to_flag_str(self.0.is_set(SPSR_EL1::N)))?;
+ writeln!(f, " Zero (Z): {}", to_flag_str(self.0.is_set(SPSR_EL1::Z)))?;
+ writeln!(f, " Carry (C): {}", to_flag_str(self.0.is_set(SPSR_EL1::C)))?;
+ writeln!(f, " Overflow (V): {}", to_flag_str(self.0.is_set(SPSR_EL1::V)))?;
+
+ let to_mask_str = |x| -> _ {
+ if x { "Masked" } else { "Unmasked" }
+ };
+
+ writeln!(f, " Exception handling state:")?;
+ writeln!(f, " Debug (D): {}", to_mask_str(self.0.is_set(SPSR_EL1::D)))?;
+ writeln!(f, " SError (A): {}", to_mask_str(self.0.is_set(SPSR_EL1::A)))?;
+ writeln!(f, " IRQ (I): {}", to_mask_str(self.0.is_set(SPSR_EL1::I)))?;
+ writeln!(f, " FIQ (F): {}", to_mask_str(self.0.is_set(SPSR_EL1::F)))?;
+
+ write!(f, " Illegal Execution State (IL): {}",
+ to_flag_str(self.0.is_set(SPSR_EL1::IL))
+ )?;
+
+ Ok(())
+ }
+}
+
+/// Human readable print of the exception context.
+impl fmt::Display for ExceptionContext {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ writeln!(f, "ELR_EL1: {:#018x}", self.elr_el1)?;
+ writeln!(f, "{}", self.spsr_el1)?;
+ writeln!(f)?;
+ writeln!(f, "General purpose register:")?;
+
+ #[rustfmt::skip]
+ let alternating = |x| -> _ {
+ if x modulo 2 == 0 { " " } else { "\n" }
+ };
+
+ // Print two registers per line.
+ for (i, reg) in self.gpr.iter().enumerate() {
+ write!(f, " x{: <2}: {: >#018x}{}", i, reg, alternating(i))?;
+ }
+ write!(f, " lr : {:#018x}", self.lr)?;
+
+ Ok(())
+ }
+}
//--------------------------------------------------------------------------------------------------
// Arch-public
//--------------------------------------------------------------------------------------------------
+/// Set the exception vector base address register.
+///
+/// # Safety
+///
+/// - The vector table and the symbol `__exception_vector_table_start` from the linker script must
+/// adhere to the alignment and size constraints demanded by the AArch64 spec.
+pub unsafe fn set_vbar_el1() {
+ // Provided by exception_vec_table.S.
+ extern "C" {
+ static mut __exception_vector_start: u64;
+ }
+ let addr: u64 = &__exception_vector_start as *const _ as u64;
+
+ VBAR_EL1.set(addr);
+
+ // Force VBAR update to complete before next instruction.
+ barrier::isb(barrier::SY);
+}
+
pub trait DaifField {
fn daif_field() -> register::Field<u32, DAIF::Register>;
}
diff -uNr 11_virtual_memory/src/arch/aarch64/exception.S 12_exceptions_part1/src/arch/aarch64/exception.S
--- 11_virtual_memory/src/arch/aarch64/exception.S
+++ 12_exceptions_part1/src/arch/aarch64/exception.S
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
+
+/// Call the function provided by parameter `\handler` after saving exception context, providing the
+/// same as the first parameter.
+.macro CALL_WITH_CONTEXT handler
+ // Make room on the stack for the exception context.
+ sub sp, sp, #16 * 17
+
+ // Store all general purpose registers on the stack.
+ stp x0, x1, [sp, #16 * 0]
+ stp x2, x3, [sp, #16 * 1]
+ stp x4, x5, [sp, #16 * 2]
+ stp x6, x7, [sp, #16 * 3]
+ stp x8, x9, [sp, #16 * 4]
+ stp x10, x11, [sp, #16 * 5]
+ stp x12, x13, [sp, #16 * 6]
+ stp x14, x15, [sp, #16 * 7]
+ stp x16, x17, [sp, #16 * 8]
+ stp x18, x19, [sp, #16 * 9]
+ stp x20, x21, [sp, #16 * 10]
+ stp x22, x23, [sp, #16 * 11]
+ stp x24, x25, [sp, #16 * 12]
+ stp x26, x27, [sp, #16 * 13]
+ stp x28, x29, [sp, #16 * 14]
+
+ // Add the exception link register (ELR_EL1) and the saved program status (SPSR_EL1).
+ mrs x1, ELR_EL1
+ mrs x2, SPSR_EL1
+
+ stp lr, x1, [sp, #16 * 15]
+ str w2, [sp, #16 * 16]
+
+ // x0 is the first argument for the function called through `\handler`.
+ mov x0, sp
+
+ // Call `\handler`.
+ bl \handler
+
+ // After returning from exception handling code, replay the saved context and return via `eret`.
+ b __exception_restore_context
+.endm
+
+.macro FIQ_SUSPEND
+1: wfe
+ b 1b
+.endm
+
+//--------------------------------------------------------------------------------------------------
+// The exception vector table.
+//--------------------------------------------------------------------------------------------------
+.section .exception_vectors, "ax", @progbits
+
+// Align by 2^11 bytes, as demanded by the AArch64 spec. Same as ALIGN(2048) in an ld script.
+.align 11
+
+// Export a symbol for the Rust code to use.
+.global __exception_vector_start
+__exception_vector_start:
+
+// Current exception level with SP_EL0.
+// .org sets the offset relative to section start.
+//
+// It must be ensured that `CALL_WITH_CONTEXT` <= 0x80 bytes.
+.org 0x000
+ CALL_WITH_CONTEXT current_el0_synchronous
+.org 0x080
+ CALL_WITH_CONTEXT current_el0_irq
+.org 0x100
+ FIQ_SUSPEND
+.org 0x180
+ CALL_WITH_CONTEXT current_el0_serror
+
+// Current exception level with SP_ELx, x > 0.
+.org 0x200
+ CALL_WITH_CONTEXT current_elx_synchronous
+.org 0x280
+ CALL_WITH_CONTEXT current_elx_irq
+.org 0x300
+ FIQ_SUSPEND
+.org 0x380
+ CALL_WITH_CONTEXT current_elx_serror
+
+// Lower exception level, aarch64
+.org 0x400
+ CALL_WITH_CONTEXT lower_aarch64_synchronous
+.org 0x480
+ CALL_WITH_CONTEXT lower_aarch64_irq
+.org 0x500
+ FIQ_SUSPEND
+.org 0x580
+ CALL_WITH_CONTEXT lower_aarch64_serror
+
+// Lower exception level, aarch32
+.org 0x600
+ CALL_WITH_CONTEXT lower_aarch32_synchronous
+.org 0x680
+ CALL_WITH_CONTEXT lower_aarch32_irq
+.org 0x700
+ FIQ_SUSPEND
+.org 0x780
+ CALL_WITH_CONTEXT lower_aarch32_serror
+.org 0x800
+
+//--------------------------------------------------------------------------------------------------
+// Helper functions
+//--------------------------------------------------------------------------------------------------
+.global __exception_restore_context
+__exception_restore_context:
+ ldr w19, [sp, #16 * 16]
+ ldp lr, x20, [sp, #16 * 15]
+
+ msr SPSR_EL1, x19
+ msr ELR_EL1, x20
+
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+ ldp x4, x5, [sp, #16 * 2]
+ ldp x6, x7, [sp, #16 * 3]
+ ldp x8, x9, [sp, #16 * 4]
+ ldp x10, x11, [sp, #16 * 5]
+ ldp x12, x13, [sp, #16 * 6]
+ ldp x14, x15, [sp, #16 * 7]
+ ldp x16, x17, [sp, #16 * 8]
+ ldp x18, x19, [sp, #16 * 9]
+ ldp x20, x21, [sp, #16 * 10]
+ ldp x22, x23, [sp, #16 * 11]
+ ldp x24, x25, [sp, #16 * 12]
+ ldp x26, x27, [sp, #16 * 13]
+ ldp x28, x29, [sp, #16 * 14]
+
+ add sp, sp, #16 * 17
+
+ eret
diff -uNr 11_virtual_memory/src/arch/aarch64.rs 12_exceptions_part1/src/arch/aarch64.rs
--- 11_virtual_memory/src/arch/aarch64.rs
+++ 12_exceptions_part1/src/arch/aarch64.rs
@@ -106,6 +106,15 @@
}
}
+/// Enable exception handling.
+///
+/// # Safety
+///
+/// - Changes the HW state of the processing element.
+pub unsafe fn enable_exception_handling() {
+ exception::set_vbar_el1();
+}
+
/// Return a reference to an `interface::mm::MMU` implementation.
pub fn mmu() -> &'static impl interface::mm::MMU {
&MMU
diff -uNr 11_virtual_memory/src/bsp.rs 12_exceptions_part1/src/bsp.rs
--- 11_virtual_memory/src/bsp.rs
+++ 12_exceptions_part1/src/bsp.rs
@@ -4,7 +4,7 @@
//! Conditional exporting of Board Support Packages.
-pub mod driver;
+mod driver;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod rpi;
diff -uNr 11_virtual_memory/src/main.rs 12_exceptions_part1/src/main.rs
--- 11_virtual_memory/src/main.rs
+++ 12_exceptions_part1/src/main.rs
@@ -22,6 +22,7 @@
#![allow(incomplete_features)]
#![feature(const_generics)]
#![feature(format_args_nl)]
+#![feature(global_asm)]
#![feature(panic_info_message)]
#![feature(trait_alias)]
#![no_main]
@@ -57,6 +58,8 @@
unsafe fn kernel_init() -> ! {
use interface::mm::MMU;
+ arch::enable_exception_handling();
+
if let Err(string) = arch::mmu().init() {
panic!("MMU: {}", string);
}
@@ -103,13 +106,28 @@
info!("Timer test, spinning for 1 second");
arch::timer().spin_for(Duration::from_secs(1));
- let remapped_uart = unsafe { bsp::driver::PL011Uart::new(0x1FFF_1000) };
- writeln!(
- remapped_uart,
- "[ !!! ] Writing through the remapped UART at 0x1FFF_1000"
- )
- .unwrap();
+ // Cause an exception by accessing a virtual address for which no translation was set up. This
+ // code accesses the address 8 GiB, which is outside the mapped address space.
+ //
+ // For demo purposes, the exception handler will catch the faulting 8 GiB address and allow
+ // execution to continue.
+ info!("");
+ info!("Trying to write to address 8 GiB...");
+ let mut big_addr: u64 = 8 * 1024 * 1024 * 1024;
+ unsafe { core::ptr::read_volatile(big_addr as *mut u64) };
+
+ info!("************************************************");
+ info!("Whoa! We recovered from a synchronous exception!");
+ info!("************************************************");
+ info!("");
+ info!("Let's try again");
+
+ // Now use address 9 GiB. The exception handler won't forgive us this time.
+ info!("Trying to write to address 9 GiB...");
+ big_addr = 9 * 1024 * 1024 * 1024;
+ unsafe { core::ptr::read_volatile(big_addr as *mut u64) };
+ // Will never reach here in this tutorial.
info!("Echoing input now");
loop {
let c = bsp::console().read_char();
```

Binary file not shown.

Binary file not shown.

@ -0,0 +1,11 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Conditional exporting of processor architecture code.
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod aarch64;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
pub use aarch64::*;

@ -0,0 +1,154 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! AArch64.
mod exception;
mod mmu;
pub mod sync;
mod time;
use crate::{bsp, interface};
use cortex_a::{asm, regs::*};
/// The entry of the `kernel` binary.
///
/// The function must be named `_start`, because the linker is looking for this exact name.
///
/// # Safety
///
/// - Linker script must ensure to place this function at `0x80_000`.
#[no_mangle]
pub unsafe extern "C" fn _start() -> ! {
const CORE_MASK: u64 = 0x3;
// Expect the boot core to start in EL2.
if (bsp::BOOT_CORE_ID == MPIDR_EL1.get() & CORE_MASK)
&& (CurrentEL.get() == CurrentEL::EL::EL2.value)
{
el2_to_el1_transition()
} else {
// If not core0, infinitely wait for events.
wait_forever()
}
}
/// Transition from EL2 to EL1.
///
/// # Safety
///
/// - The HW state of EL1 must be prepared in a sound way.
/// - Exception return from EL2 must must continue execution in EL1 with ´runtime_init::init()`.
#[inline(always)]
unsafe fn el2_to_el1_transition() -> ! {
// Enable timer counter registers for EL1.
CNTHCTL_EL2.write(CNTHCTL_EL2::EL1PCEN::SET + CNTHCTL_EL2::EL1PCTEN::SET);
// No offset for reading the counters.
CNTVOFF_EL2.set(0);
// Set EL1 execution state to AArch64.
HCR_EL2.write(HCR_EL2::RW::EL1IsAarch64);
// Set up a simulated exception return.
//
// First, fake a saved program status, where all interrupts were masked and SP_EL1 was used as a
// stack pointer.
SPSR_EL2.write(
SPSR_EL2::D::Masked
+ SPSR_EL2::A::Masked
+ SPSR_EL2::I::Masked
+ SPSR_EL2::F::Masked
+ SPSR_EL2::M::EL1h,
);
// Second, let the link register point to init().
ELR_EL2.set(crate::runtime_init::init as *const () as u64);
// Set up SP_EL1 (stack pointer), which will be used by EL1 once we "return" to it.
SP_EL1.set(bsp::BOOT_CORE_STACK_START);
// Use `eret` to "return" to EL1. This will result in execution of `reset()` in EL1.
asm::eret()
}
//--------------------------------------------------------------------------------------------------
// Global instances
//--------------------------------------------------------------------------------------------------
static TIMER: time::Timer = time::Timer;
static MMU: mmu::MMU = mmu::MMU;
//--------------------------------------------------------------------------------------------------
// Implementation of the kernel's architecture abstraction code
//--------------------------------------------------------------------------------------------------
pub use asm::nop;
/// Spin for `n` cycles.
pub fn spin_for_cycles(n: usize) {
for _ in 0..n {
asm::nop();
}
}
/// Return a reference to a `interface::time::TimeKeeper` implementation.
pub fn timer() -> &'static impl interface::time::Timer {
&TIMER
}
/// Pause execution on the calling CPU core.
#[inline(always)]
pub fn wait_forever() -> ! {
loop {
asm::wfe()
}
}
/// Enable exception handling.
///
/// # Safety
///
/// - Changes the HW state of the processing element.
pub unsafe fn enable_exception_handling() {
exception::set_vbar_el1();
}
/// Return a reference to an `interface::mm::MMU` implementation.
pub fn mmu() -> &'static impl interface::mm::MMU {
&MMU
}
/// Information about the HW state.
pub mod state {
use cortex_a::regs::*;
/// The processing element's current privilege level.
pub fn current_privilege_level() -> &'static str {
let el = CurrentEL.read_as_enum(CurrentEL::EL);
match el {
Some(CurrentEL::EL::Value::EL2) => "EL2",
Some(CurrentEL::EL::Value::EL1) => "EL1",
_ => "Unknown",
}
}
#[rustfmt::skip]
pub fn print_exception_state() {
use super::{
exception,
exception::{Debug, SError, FIQ, IRQ},
};
use crate::info;
let to_mask_str = |x| -> _ {
if x { "Masked" } else { "Unmasked" }
};
info!(" Debug: {}", to_mask_str(exception::is_masked::<Debug>()));
info!(" SError: {}", to_mask_str(exception::is_masked::<SError>()));
info!(" IRQ: {}", to_mask_str(exception::is_masked::<IRQ>()));
info!(" FIQ: {}", to_mask_str(exception::is_masked::<FIQ>()));
}
}

@ -0,0 +1,135 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
/// Call the function provided by parameter `\handler` after saving exception context, providing the
/// same as the first parameter.
.macro CALL_WITH_CONTEXT handler
// Make room on the stack for the exception context.
sub sp, sp, #16 * 17
// Store all general purpose registers on the stack.
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
// Add the exception link register (ELR_EL1) and the saved program status (SPSR_EL1).
mrs x1, ELR_EL1
mrs x2, SPSR_EL1
stp lr, x1, [sp, #16 * 15]
str w2, [sp, #16 * 16]
// x0 is the first argument for the function called through `\handler`.
mov x0, sp
// Call `\handler`.
bl \handler
// After returning from exception handling code, replay the saved context and return via `eret`.
b __exception_restore_context
.endm
.macro FIQ_SUSPEND
1: wfe
b 1b
.endm
//--------------------------------------------------------------------------------------------------
// The exception vector table.
//--------------------------------------------------------------------------------------------------
.section .exception_vectors, "ax", @progbits
// Align by 2^11 bytes, as demanded by the AArch64 spec. Same as ALIGN(2048) in an ld script.
.align 11
// Export a symbol for the Rust code to use.
.global __exception_vector_start
__exception_vector_start:
// Current exception level with SP_EL0.
// .org sets the offset relative to section start.
//
// It must be ensured that `CALL_WITH_CONTEXT` <= 0x80 bytes.
.org 0x000
CALL_WITH_CONTEXT current_el0_synchronous
.org 0x080
CALL_WITH_CONTEXT current_el0_irq
.org 0x100
FIQ_SUSPEND
.org 0x180
CALL_WITH_CONTEXT current_el0_serror
// Current exception level with SP_ELx, x > 0.
.org 0x200
CALL_WITH_CONTEXT current_elx_synchronous
.org 0x280
CALL_WITH_CONTEXT current_elx_irq
.org 0x300
FIQ_SUSPEND
.org 0x380
CALL_WITH_CONTEXT current_elx_serror
// Lower exception level, aarch64
.org 0x400
CALL_WITH_CONTEXT lower_aarch64_synchronous
.org 0x480
CALL_WITH_CONTEXT lower_aarch64_irq
.org 0x500
FIQ_SUSPEND
.org 0x580
CALL_WITH_CONTEXT lower_aarch64_serror
// Lower exception level, aarch32
.org 0x600
CALL_WITH_CONTEXT lower_aarch32_synchronous
.org 0x680
CALL_WITH_CONTEXT lower_aarch32_irq
.org 0x700
FIQ_SUSPEND
.org 0x780
CALL_WITH_CONTEXT lower_aarch32_serror
.org 0x800
//--------------------------------------------------------------------------------------------------
// Helper functions
//--------------------------------------------------------------------------------------------------
.global __exception_restore_context
__exception_restore_context:
ldr w19, [sp, #16 * 16]
ldp lr, x20, [sp, #16 * 15]
msr SPSR_EL1, x19
msr ELR_EL1, x20
ldp x0, x1, [sp, #16 * 0]
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
ldp x8, x9, [sp, #16 * 4]
ldp x10, x11, [sp, #16 * 5]
ldp x12, x13, [sp, #16 * 6]
ldp x14, x15, [sp, #16 * 7]
ldp x16, x17, [sp, #16 * 8]
ldp x18, x19, [sp, #16 * 9]
ldp x20, x21, [sp, #16 * 10]
ldp x22, x23, [sp, #16 * 11]
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
add sp, sp, #16 * 17
eret

@ -0,0 +1,284 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Exception handling.
use core::fmt;
use cortex_a::{asm, barrier, regs::*};
use register::InMemoryRegister;
// Assembly counterpart to this file.
global_asm!(include_str!("exception.S"));
/// Wrapper struct for memory copy of SPSR_EL1
#[repr(transparent)]
struct SpsrEL1(InMemoryRegister<u32, SPSR_EL1::Register>);
/// The exception context as it is stored on the stack on exception entry.
#[repr(C)]
struct ExceptionContext {
// General Purpose Registers.
gpr: [u64; 30],
// The link register, aka x30.
lr: u64,
// Exception link register. The program counter at the time the exception happened.
elr_el1: u64,
// Saved program status.
spsr_el1: SpsrEL1,
}
/// Wrapper struct for pretty printing ESR_EL1
struct EsrEL1;
//--------------------------------------------------------------------------------------------------
// Exception vector implementation
//--------------------------------------------------------------------------------------------------
/// Print verbose information about the exception and the panic.
fn default_exception_handler(e: &ExceptionContext) {
panic!(
"\n\nCPU Exception!\n\
FAR_EL1: {:#018x}\n\
{}\n\
{}",
FAR_EL1.get(),
EsrEL1 {},
e
);
}
//--------------------------------------------------------------------------------------------------
// Current, EL0
//--------------------------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn current_el0_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn current_el0_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn current_el0_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//--------------------------------------------------------------------------------------------------
// Current, ELx
//--------------------------------------------------------------------------------------------------
/// Asynchronous exception taken from the current EL, using SP of the current EL.
#[no_mangle]
unsafe extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) {
let far_el1 = FAR_EL1.extract().get();
// This catches the demo case for this tutorial. If the fault address happens to be 8 GiB,
// advance the exception link register for one instruction, so that execution can continue.
if far_el1 == 8 * 1024 * 1024 * 1024 {
e.elr_el1 += 4;
asm::eret()
}
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn current_elx_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn current_elx_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//--------------------------------------------------------------------------------------------------
// Lower, AArch64
//--------------------------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn lower_aarch64_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch64_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch64_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//--------------------------------------------------------------------------------------------------
// Lower, AArch32
//--------------------------------------------------------------------------------------------------
#[no_mangle]
unsafe extern "C" fn lower_aarch32_synchronous(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch32_irq(e: &mut ExceptionContext) {
default_exception_handler(e);
}
#[no_mangle]
unsafe extern "C" fn lower_aarch32_serror(e: &mut ExceptionContext) {
default_exception_handler(e);
}
//--------------------------------------------------------------------------------------------------
// Pretty printing
//--------------------------------------------------------------------------------------------------
/// Human readable ESR_EL1.
#[rustfmt::skip]
impl fmt::Display for EsrEL1 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let esr_el1 = ESR_EL1.extract();
// Raw print of whole register.
writeln!(f, "ESR_EL1: {:#010x}", esr_el1.get())?;
// Raw print of exception class.
write!(f, " Exception Class (EC) : {:#x}", esr_el1.read(ESR_EL1::EC))?;
// Exception class, translation.
let ec_translation = match esr_el1.read_as_enum(ESR_EL1::EC) {
Some(ESR_EL1::EC::Value::DataAbortCurrentEL) => "Data Abort, current EL",
_ => "N/A",
};
writeln!(f, " - {}", ec_translation)?;
// Raw print of instruction specific syndrome.
write!(f, " Instr Specific Syndrome (ISS): {:#x}", esr_el1.read(ESR_EL1::ISS))?;
Ok(())
}
}
/// Human readable SPSR_EL1.
#[rustfmt::skip]
impl fmt::Display for SpsrEL1 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Raw value.
writeln!(f, "SPSR_EL1: {:#010x}", self.0.get())?;
let to_flag_str = |x| -> _ {
if x { "Set" } else { "Not set" }
};
writeln!(f, " Flags:")?;
writeln!(f, " Negative (N): {}", to_flag_str(self.0.is_set(SPSR_EL1::N)))?;
writeln!(f, " Zero (Z): {}", to_flag_str(self.0.is_set(SPSR_EL1::Z)))?;
writeln!(f, " Carry (C): {}", to_flag_str(self.0.is_set(SPSR_EL1::C)))?;
writeln!(f, " Overflow (V): {}", to_flag_str(self.0.is_set(SPSR_EL1::V)))?;
let to_mask_str = |x| -> _ {
if x { "Masked" } else { "Unmasked" }
};
writeln!(f, " Exception handling state:")?;
writeln!(f, " Debug (D): {}", to_mask_str(self.0.is_set(SPSR_EL1::D)))?;
writeln!(f, " SError (A): {}", to_mask_str(self.0.is_set(SPSR_EL1::A)))?;
writeln!(f, " IRQ (I): {}", to_mask_str(self.0.is_set(SPSR_EL1::I)))?;
writeln!(f, " FIQ (F): {}", to_mask_str(self.0.is_set(SPSR_EL1::F)))?;
write!(f, " Illegal Execution State (IL): {}",
to_flag_str(self.0.is_set(SPSR_EL1::IL))
)?;
Ok(())
}
}
/// Human readable print of the exception context.
impl fmt::Display for ExceptionContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "ELR_EL1: {:#018x}", self.elr_el1)?;
writeln!(f, "{}", self.spsr_el1)?;
writeln!(f)?;
writeln!(f, "General purpose register:")?;
#[rustfmt::skip]
let alternating = |x| -> _ {
if x % 2 == 0 { " " } else { "\n" }
};
// Print two registers per line.
for (i, reg) in self.gpr.iter().enumerate() {
write!(f, " x{: <2}: {: >#018x}{}", i, reg, alternating(i))?;
}
write!(f, " lr : {:#018x}", self.lr)?;
Ok(())
}
}
//--------------------------------------------------------------------------------------------------
// Arch-public
//--------------------------------------------------------------------------------------------------
/// Set the exception vector base address register.
///
/// # Safety
///
/// - The vector table and the symbol `__exception_vector_table_start` from the linker script must
/// adhere to the alignment and size constraints demanded by the AArch64 spec.
pub unsafe fn set_vbar_el1() {
// Provided by exception_vec_table.S.
extern "C" {
static mut __exception_vector_start: u64;
}
let addr: u64 = &__exception_vector_start as *const _ as u64;
VBAR_EL1.set(addr);
// Force VBAR update to complete before next instruction.
barrier::isb(barrier::SY);
}
pub trait DaifField {
fn daif_field() -> register::Field<u32, DAIF::Register>;
}
pub struct Debug;
pub struct SError;
pub struct IRQ;
pub struct FIQ;
impl DaifField for Debug {
fn daif_field() -> register::Field<u32, DAIF::Register> {
DAIF::D
}
}
impl DaifField for SError {
fn daif_field() -> register::Field<u32, DAIF::Register> {
DAIF::A
}
}
impl DaifField for IRQ {
fn daif_field() -> register::Field<u32, DAIF::Register> {
DAIF::I
}
}
impl DaifField for FIQ {
fn daif_field() -> register::Field<u32, DAIF::Register> {
DAIF::F
}
}
pub fn is_masked<T: DaifField>() -> bool {
DAIF.is_set(T::daif_field())
}

@ -0,0 +1,316 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management Unit.
//!
//! Static page tables, compiled on boot; Everything 64 KiB granule.
use crate::{
bsp, interface,
memory::{AccessPermissions, AttributeFields, MemAttributes},
};
use core::convert;
use cortex_a::{barrier, regs::*};
use register::register_bitfields;
// A table descriptor, as per AArch64 Reference Manual Figure D4-15.
register_bitfields! {u64,
STAGE1_TABLE_DESCRIPTOR [
/// Physical address of the next page table.
NEXT_LEVEL_TABLE_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
// A level 3 page descriptor, as per AArch64 Reference Manual Figure D4-17.
register_bitfields! {u64,
STAGE1_PAGE_DESCRIPTOR [
/// Privileged execute-never
PXN OFFSET(53) NUMBITS(1) [
False = 0,
True = 1
],
/// Physical address of the next page table (lvl2) or the page descriptor (lvl3).
OUTPUT_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16]
/// Access flag
AF OFFSET(10) NUMBITS(1) [
False = 0,
True = 1
],
/// Shareability field
SH OFFSET(8) NUMBITS(2) [
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Access Permissions
AP OFFSET(6) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
/// Memory attributes index into the MAIR_EL1 register
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
// Two newtypes for added type safety, so that you cannot accidentally place a TableDescriptor into
// a PageDescriptor slot in `struct PageTables`, and vice versa.
#[derive(Copy, Clone)]
#[repr(transparent)]
struct RawTableDescriptor(u64);
#[derive(Copy, Clone)]
#[repr(transparent)]
struct RawPageDescriptor(u64);
const SIXTYFOUR_KIB_SHIFT: usize = 16; // log2(64 * 1024)
const FIVETWELVE_MIB_SHIFT: usize = 29; // log2(512 * 1024 * 1024)
/// Big monolithic struct for storing the page tables. Individual levels must be 64 KiB aligned,
/// hence the "reverse" order of appearance.
#[repr(C)]
#[repr(align(65536))]
struct PageTables<const N: usize> {
// Page descriptors, covering 64 KiB windows per entry.
lvl3: [[RawPageDescriptor; 8192]; N],
// Table descriptors, covering 512 MiB windows.
lvl2: [RawTableDescriptor; N],
}
/// Usually evaluates to 1 GiB for RPi3 and 4 GiB for RPi 4.
const ENTRIES_512_MIB: usize = bsp::addr_space_size() >> FIVETWELVE_MIB_SHIFT;
/// The page tables.
///
/// Supposed to land in `.bss`. Therefore, ensure that they boil down to all "0" entries.
static mut TABLES: PageTables<{ ENTRIES_512_MIB }> = PageTables {
lvl3: [[RawPageDescriptor(0); 8192]; ENTRIES_512_MIB],
lvl2: [RawTableDescriptor(0); ENTRIES_512_MIB],
};
trait BaseAddr {
fn base_addr_u64(&self) -> u64;
fn base_addr_usize(&self) -> usize;
}
impl<T, const N: usize> BaseAddr for [T; N] {
fn base_addr_u64(&self) -> u64 {
self as *const T as u64
}
fn base_addr_usize(&self) -> usize {
self as *const T as usize
}
}
/// A descriptor pointing to the next page table.
struct TableDescriptor(register::FieldValue<u64, STAGE1_TABLE_DESCRIPTOR::Register>);
impl TableDescriptor {
fn new(next_lvl_table_addr: usize) -> TableDescriptor {
let shifted = next_lvl_table_addr >> SIXTYFOUR_KIB_SHIFT;
TableDescriptor(
STAGE1_TABLE_DESCRIPTOR::VALID::True
+ STAGE1_TABLE_DESCRIPTOR::TYPE::Table
+ STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64),
)
}
}
impl convert::From<TableDescriptor> for RawTableDescriptor {
fn from(desc: TableDescriptor) -> Self {
RawTableDescriptor(desc.0.value)
}
}
/// Convert the kernel's generic memory range attributes to HW-specific attributes of the MMU.
impl convert::From<AttributeFields>
for register::FieldValue<u64, STAGE1_PAGE_DESCRIPTOR::Register>
{
fn from(attribute_fields: AttributeFields) -> Self {
// Memory attributes
let mut desc = match attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => {
STAGE1_PAGE_DESCRIPTOR::SH::InnerShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(mair::NORMAL)
}
MemAttributes::Device => {
STAGE1_PAGE_DESCRIPTOR::SH::OuterShareable
+ STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(mair::DEVICE)
}
};
// Access Permissions
desc += match attribute_fields.acc_perms {
AccessPermissions::ReadOnly => STAGE1_PAGE_DESCRIPTOR::AP::RO_EL1,
AccessPermissions::ReadWrite => STAGE1_PAGE_DESCRIPTOR::AP::RW_EL1,
};
// Execute Never
desc += if attribute_fields.execute_never {
STAGE1_PAGE_DESCRIPTOR::PXN::True
} else {
STAGE1_PAGE_DESCRIPTOR::PXN::False
};
desc
}
}
/// A page descriptor with 64 KiB aperture.
///
/// The output points to physical memory.
struct PageDescriptor(register::FieldValue<u64, STAGE1_PAGE_DESCRIPTOR::Register>);
impl PageDescriptor {
fn new(output_addr: usize, attribute_fields: AttributeFields) -> PageDescriptor {
let shifted = output_addr >> SIXTYFOUR_KIB_SHIFT;
PageDescriptor(
STAGE1_PAGE_DESCRIPTOR::VALID::True
+ STAGE1_PAGE_DESCRIPTOR::AF::True
+ attribute_fields.into()
+ STAGE1_PAGE_DESCRIPTOR::TYPE::Table
+ STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted as u64),
)
}
}
impl convert::From<PageDescriptor> for RawPageDescriptor {
fn from(desc: PageDescriptor) -> Self {
RawPageDescriptor(desc.0.value)
}
}
/// Constants for indexing the MAIR_EL1.
#[allow(dead_code)]
mod mair {
pub const DEVICE: u64 = 0;
pub const NORMAL: u64 = 1;
}
/// Setup function for the MAIR_EL1 register.
fn set_up_mair() {
// Define the memory types being mapped.
MAIR_EL1.write(
// Attribute 1 - Cacheable normal DRAM
MAIR_EL1::Attr1_HIGH::Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc
+ MAIR_EL1::Attr1_LOW_MEMORY::InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc
// Attribute 0 - Device
+ MAIR_EL1::Attr0_HIGH::Device
+ MAIR_EL1::Attr0_LOW_DEVICE::Device_nGnRE,
);
}
/// Iterates over all static page table entries and fills them at once.
///
/// # Safety
///
/// - Modifies a `static mut`. Ensure it only happens from here.
unsafe fn populate_pt_entries() -> Result<(), &'static str> {
for (l2_nr, l2_entry) in TABLES.lvl2.iter_mut().enumerate() {
*l2_entry = TableDescriptor::new(TABLES.lvl3[l2_nr].base_addr_usize()).into();
for (l3_nr, l3_entry) in TABLES.lvl3[l2_nr].iter_mut().enumerate() {
let virt_addr = (l2_nr << FIVETWELVE_MIB_SHIFT) + (l3_nr << SIXTYFOUR_KIB_SHIFT);
let (output_addr, attribute_fields) =
bsp::virt_mem_layout().get_virt_addr_properties(virt_addr)?;
*l3_entry = PageDescriptor::new(output_addr, attribute_fields).into();
}
}
Ok(())
}
/// Configure various settings of stage 1 of the EL1 translation regime.
fn configure_translation_control() {
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::TG0::KiB_64
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(32), // TTBR0 spans 4 GiB total.
);
}
//--------------------------------------------------------------------------------------------------
// Arch-public
//--------------------------------------------------------------------------------------------------
pub struct MMU;
//--------------------------------------------------------------------------------------------------
// OS interface implementations
//--------------------------------------------------------------------------------------------------
impl interface::mm::MMU for MMU {
/// Compile the page tables from the `BSP`-supplied `virt_mem_layout()`.
///
/// # Safety
///
/// - User must ensure that the hardware supports the paremeters being set here.
unsafe fn init(&self) -> Result<(), &'static str> {
// Fail early if translation granule is not supported. Both RPis support it, though.
if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) {
return Err("64 KiB translation granule not supported");
}
// Prepare the memory attribute indirection register.
set_up_mair();
// Populate page tables.
populate_pt_entries()?;
// Set the "Translation Table Base Register".
TTBR0_EL1.set_baddr(TABLES.lvl2.base_addr_u64());
configure_translation_control();
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
barrier::isb(barrier::SY);
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction
barrier::isb(barrier::SY);
Ok(())
}
}

@ -0,0 +1,52 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Synchronization primitives.
use crate::interface;
use core::cell::UnsafeCell;
//--------------------------------------------------------------------------------------------------
// Arch-public
//--------------------------------------------------------------------------------------------------
/// A pseudo-lock for teaching purposes.
///
/// Used to introduce [interior mutability].
///
/// In contrast to a real Mutex implementation, does not protect against concurrent access to the
/// contained data. This part is preserved for later lessons.
///
/// The lock will only be used as long as it is safe to do so, i.e. as long as the kernel is
/// executing single-threaded, aka only running on a single core with interrupts disabled.
///
/// [interior mutability]: https://doc.rust-lang.org/std/cell/index.html
pub struct NullLock<T: ?Sized> {
data: UnsafeCell<T>,
}
unsafe impl<T: ?Sized + Send> Send for NullLock<T> {}
unsafe impl<T: ?Sized + Send> Sync for NullLock<T> {}
impl<T> NullLock<T> {
pub const fn new(data: T) -> NullLock<T> {
NullLock {
data: UnsafeCell::new(data),
}
}
}
//--------------------------------------------------------------------------------------------------
// OS interface implementations
//--------------------------------------------------------------------------------------------------
impl<T> interface::sync::Mutex for &NullLock<T> {
type Data = T;
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R {
// In a real lock, there would be code encapsulating this line that ensures that this
// mutable reference will ever only be given out once at a time.
f(unsafe { &mut *self.data.get() })
}
}

@ -0,0 +1,81 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Timer primitives.
use crate::{interface, warn};
use core::time::Duration;
use cortex_a::regs::*;
const NS_PER_S: u64 = 1_000_000_000;
//--------------------------------------------------------------------------------------------------
// Arch-public
//--------------------------------------------------------------------------------------------------
pub struct Timer;
//--------------------------------------------------------------------------------------------------
// OS interface implementations
//--------------------------------------------------------------------------------------------------
impl interface::time::Timer for Timer {
fn resolution(&self) -> Duration {
Duration::from_nanos(NS_PER_S / (CNTFRQ_EL0.get() as u64))
}
fn uptime(&self) -> Duration {
let frq: u64 = CNTFRQ_EL0.get() as u64;
let current_count: u64 = CNTPCT_EL0.get() * NS_PER_S;
Duration::from_nanos(current_count / frq)
}
fn spin_for(&self, duration: Duration) {
// Instantly return on zero.
if duration.as_nanos() == 0 {
return;
}
// Calculate the register compare value.
let frq = CNTFRQ_EL0.get() as u64;
let x = match frq.checked_mul(duration.as_nanos() as u64) {
None => {
warn!("Spin duration too long, skipping");
return;
}
Some(val) => val,
};
let tval = x / NS_PER_S;
// Check if it is within supported bounds.
let warn: Option<&str> = if tval == 0 {
Some("smaller")
} else if tval > u32::max_value().into() {
Some("bigger")
} else {
None
};
if let Some(w) = warn {
warn!(
"Spin duration {} than architecturally supported, skipping",
w
);
return;
}
// Set the compare value register.
CNTP_TVAL_EL0.set(tval as u32);
// Kick off the counting. // Disable timer interrupt.
CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::SET);
// ISTATUS will be '1' when cval ticks have passed. Busy-check it.
while !CNTP_CTL_EL0.matches_all(CNTP_CTL_EL0::ISTATUS::SET) {}
// Disable counting again.
CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::CLEAR);
}
}

@ -0,0 +1,13 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Conditional exporting of Board Support Packages.
mod driver;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod rpi;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
pub use rpi::*;

@ -0,0 +1,11 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Drivers.
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
mod bcm;
#[cfg(any(feature = "bsp_rpi3", feature = "bsp_rpi4"))]
pub use bcm::*;

@ -0,0 +1,11 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! BCM driver top level.
mod bcm2xxx_gpio;
mod bcm2xxx_pl011_uart;
pub use bcm2xxx_gpio::GPIO;
pub use bcm2xxx_pl011_uart::{PL011Uart, PanicUart};

@ -0,0 +1,145 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! GPIO driver.
use crate::{arch, arch::sync::NullLock, interface};
use core::ops;
use register::{mmio::ReadWrite, register_bitfields, register_structs};
// GPIO registers.
//
// Descriptions taken from
// https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
register_bitfields! {
u32,
/// GPIO Function Select 1
GPFSEL1 [
/// Pin 15
FSEL15 OFFSET(15) NUMBITS(3) [
Input = 0b000,
Output = 0b001,
AltFunc0 = 0b100 // PL011 UART RX
],
/// Pin 14
FSEL14 OFFSET(12) NUMBITS(3) [
Input = 0b000,
Output = 0b001,
AltFunc0 = 0b100 // PL011 UART TX
]
],
/// GPIO Pull-up/down Clock Register 0
GPPUDCLK0 [
/// Pin 15
PUDCLK15 OFFSET(15) NUMBITS(1) [
NoEffect = 0,
AssertClock = 1
],
/// Pin 14
PUDCLK14 OFFSET(14) NUMBITS(1) [
NoEffect = 0,
AssertClock = 1
]
]
}
register_structs! {
#[allow(non_snake_case)]
RegisterBlock {
(0x00 => GPFSEL0: ReadWrite<u32>),
(0x04 => GPFSEL1: ReadWrite<u32, GPFSEL1::Register>),
(0x08 => GPFSEL2: ReadWrite<u32>),
(0x0C => GPFSEL3: ReadWrite<u32>),
(0x10 => GPFSEL4: ReadWrite<u32>),
(0x14 => GPFSEL5: ReadWrite<u32>),
(0x18 => _reserved1),
(0x94 => GPPUD: ReadWrite<u32>),
(0x98 => GPPUDCLK0: ReadWrite<u32, GPPUDCLK0::Register>),
(0x9C => GPPUDCLK1: ReadWrite<u32>),
(0xA0 => @END),
}
}
/// The driver's private data.
struct GPIOInner {
base_addr: usize,
}
/// Deref to RegisterBlock.
impl ops::Deref for GPIOInner {
type Target = RegisterBlock;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr() }
}
}
impl GPIOInner {
const fn new(base_addr: usize) -> GPIOInner {
GPIOInner { base_addr }
}
/// Return a pointer to the register block.
fn ptr(&self) -> *const RegisterBlock {
self.base_addr as *const _
}
}
//--------------------------------------------------------------------------------------------------
// BSP-public
//--------------------------------------------------------------------------------------------------
use interface::sync::Mutex;
/// The driver's main struct.
pub struct GPIO {
inner: NullLock<GPIOInner>,
}
impl GPIO {
pub const unsafe fn new(base_addr: usize) -> GPIO {
GPIO {
inner: NullLock::new(GPIOInner::new(base_addr)),
}
}
/// Map PL011 UART as standard output.
///
/// TX to pin 14
/// RX to pin 15
pub fn map_pl011_uart(&self) {
let mut r = &self.inner;
r.lock(|inner| {
// Map to pins.
inner
.GPFSEL1
.modify(GPFSEL1::FSEL14::AltFunc0 + GPFSEL1::FSEL15::AltFunc0);
// Enable pins 14 and 15.
inner.GPPUD.set(0);
arch::spin_for_cycles(150);
inner
.GPPUDCLK0
.write(GPPUDCLK0::PUDCLK14::AssertClock + GPPUDCLK0::PUDCLK15::AssertClock);
arch::spin_for_cycles(150);
inner.GPPUDCLK0.set(0);
})
}
}
//--------------------------------------------------------------------------------------------------
// OS interface implementations
//--------------------------------------------------------------------------------------------------
impl interface::driver::DeviceDriver for GPIO {
fn compatible(&self) -> &str {
"GPIO"
}
}

@ -0,0 +1,327 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! PL011 UART driver.
use crate::{arch, arch::sync::NullLock, interface};
use core::{fmt, ops};
use register::{mmio::*, register_bitfields, register_structs};
// PL011 UART registers.
//
// Descriptions taken from
// https://github.com/raspberrypi/documentation/files/1888662/BCM2837-ARM-Peripherals.-.Revised.-.V2-1.pdf
register_bitfields! {
u32,
/// Flag Register
FR [
/// Transmit FIFO empty. The meaning of this bit depends on the state of the FEN bit in the
/// Line Control Register, UARTLCR_ LCRH.
///
/// If the FIFO is disabled, this bit is set when the transmit holding register is empty. If
/// the FIFO is enabled, the TXFE bit is set when the transmit FIFO is empty. This bit does
/// not indicate if there is data in the transmit shift register.
TXFE OFFSET(7) NUMBITS(1) [],
/// Transmit FIFO full. The meaning of this bit depends on the state of the FEN bit in the
/// UARTLCR_ LCRH Register.
///
/// If the FIFO is disabled, this bit is set when the transmit holding register is full. If
/// the FIFO is enabled, the TXFF bit is set when the transmit FIFO is full.
TXFF OFFSET(5) NUMBITS(1) [],
/// Receive FIFO empty. The meaning of this bit depends on the state of the FEN bit in the
/// UARTLCR_H Register.
///
/// If the FIFO is disabled, this bit is set when the receive holding register is empty. If
/// the FIFO is enabled, the RXFE bit is set when the receive FIFO is empty.
RXFE OFFSET(4) NUMBITS(1) []
],
/// Integer Baud rate divisor
IBRD [
/// Integer Baud rate divisor
IBRD OFFSET(0) NUMBITS(16) []
],
/// Fractional Baud rate divisor
FBRD [
/// Fractional Baud rate divisor
FBRD OFFSET(0) NUMBITS(6) []
],
/// Line Control register
LCRH [
/// Word length. These bits indicate the number of data bits transmitted or received in a
/// frame.
WLEN OFFSET(5) NUMBITS(2) [
FiveBit = 0b00,
SixBit = 0b01,
SevenBit = 0b10,
EightBit = 0b11
],
/// Enable FIFOs:
///
/// 0 = FIFOs are disabled (character mode) that is, the FIFOs become 1-byte-deep holding
/// registers
///
/// 1 = transmit and receive FIFO buffers are enabled (FIFO mode).
FEN OFFSET(4) NUMBITS(1) [
FifosDisabled = 0,
FifosEnabled = 1
]
],
/// Control Register
CR [
/// Receive enable. If this bit is set to 1, the receive section of the UART is enabled.
/// Data reception occurs for UART signals. When the UART is disabled in the middle of
/// reception, it completes the current character before stopping.
RXE OFFSET(9) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// Transmit enable. If this bit is set to 1, the transmit section of the UART is enabled.
/// Data transmission occurs for UART signals. When the UART is disabled in the middle of
/// transmission, it completes the current character before stopping.
TXE OFFSET(8) NUMBITS(1) [
Disabled = 0,
Enabled = 1
],
/// UART enable
UARTEN OFFSET(0) NUMBITS(1) [
/// If the UART is disabled in the middle of transmission or reception, it completes the
/// current character before stopping.
Disabled = 0,
Enabled = 1
]
],
/// Interrupt Clear Register
ICR [
/// Meta field for all pending interrupts
ALL OFFSET(0) NUMBITS(11) []
]
}
register_structs! {
#[allow(non_snake_case)]
RegisterBlock {
(0x00 => DR: ReadWrite<u32>),
(0x04 => _reserved1),
(0x18 => FR: ReadOnly<u32, FR::Register>),
(0x1c => _reserved2),
(0x24 => IBRD: WriteOnly<u32, IBRD::Register>),
(0x28 => FBRD: WriteOnly<u32, FBRD::Register>),
(0x2c => LCRH: WriteOnly<u32, LCRH::Register>),
(0x30 => CR: WriteOnly<u32, CR::Register>),
(0x34 => _reserved3),
(0x44 => ICR: WriteOnly<u32, ICR::Register>),
(0x48 => @END),
}
}
/// The driver's mutex protected part.
pub struct PL011UartInner {
base_addr: usize,
chars_written: usize,
}
/// Deref to RegisterBlock.
///
/// Allows writing
/// ```
/// self.DR.read()
/// ```
/// instead of something along the lines of
/// ```
/// unsafe { (*PL011UartInner::ptr()).DR.read() }
/// ```
impl ops::Deref for PL011UartInner {
type Target = RegisterBlock;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr() }
}
}
impl PL011UartInner {
pub const unsafe fn new(base_addr: usize) -> PL011UartInner {
PL011UartInner {
base_addr,
chars_written: 0,
}
}
/// Set up baud rate and characteristics.
///
/// Results in 8N1 and 115200 baud (if the clk has been previously set to 4 MHz by the
/// firmware).
pub fn init(&self) {
// Turn it off temporarily.
self.CR.set(0);
self.ICR.write(ICR::ALL::CLEAR);
self.IBRD.write(IBRD::IBRD.val(26)); // Results in 115200 baud for UART Clk of 48 MHz.
self.FBRD.write(FBRD::FBRD.val(3));
self.LCRH
.write(LCRH::WLEN::EightBit + LCRH::FEN::FifosEnabled); // 8N1 + Fifo on
self.CR
.write(CR::UARTEN::Enabled + CR::TXE::Enabled + CR::RXE::Enabled);
}
/// Return a pointer to the register block.
fn ptr(&self) -> *const RegisterBlock {
self.base_addr as *const _
}
/// Send a character.
fn write_char(&mut self, c: char) {
// Spin while TX FIFO full is set, waiting for an empty slot.
while self.FR.matches_all(FR::TXFF::SET) {
arch::nop();
}
// Write the character to the buffer.
self.DR.set(c as u32);
}
}
/// Implementing `core::fmt::Write` enables usage of the `format_args!` macros, which in turn are
/// used to implement the `kernel`'s `print!` and `println!` macros. By implementing `write_str()`,
/// we get `write_fmt()` automatically.
///
/// The function takes an `&mut self`, so it must be implemented for the inner struct.
///
/// See [`src/print.rs`].
///
/// [`src/print.rs`]: ../../print/index.html
impl fmt::Write for PL011UartInner {
fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.chars() {
// Convert newline to carrige return + newline.
if c == '\n' {
self.write_char('\r')
}
self.write_char(c);
}
self.chars_written += s.len();
Ok(())
}
}
//--------------------------------------------------------------------------------------------------
// Export the inner struct so that BSPs can use it for the panic handler
//--------------------------------------------------------------------------------------------------
pub use PL011UartInner as PanicUart;
//--------------------------------------------------------------------------------------------------
// BSP-public
//--------------------------------------------------------------------------------------------------
/// The driver's main struct.
pub struct PL011Uart {
inner: NullLock<PL011UartInner>,
}
impl PL011Uart {
/// # Safety
///
/// The user must ensure to provide the correct `base_addr`.
pub const unsafe fn new(base_addr: usize) -> PL011Uart {
PL011Uart {
inner: NullLock::new(PL011UartInner::new(base_addr)),
}
}
}
//--------------------------------------------------------------------------------------------------
// OS interface implementations
//--------------------------------------------------------------------------------------------------
use interface::sync::Mutex;
impl interface::driver::DeviceDriver for PL011Uart {
fn compatible(&self) -> &str {
"PL011Uart"
}
fn init(&self) -> interface::driver::Result {
let mut r = &self.inner;
r.lock(|inner| inner.init());
Ok(())
}
}
impl interface::console::Write for PL011Uart {
/// Passthrough of `args` to the `core::fmt::Write` implementation, but guarded by a Mutex to
/// serialize access.
fn write_char(&self, c: char) {
let mut r = &self.inner;
r.lock(|inner| inner.write_char(c));
}
fn write_fmt(&self, args: core::fmt::Arguments) -> fmt::Result {
// Fully qualified syntax for the call to `core::fmt::Write::write:fmt()` to increase
// readability.
let mut r = &self.inner;
r.lock(|inner| fmt::Write::write_fmt(inner, args))
}
fn flush(&self) {
let mut r = &self.inner;
// Spin until TX FIFO empty is set.
r.lock(|inner| {
while !inner.FR.matches_all(FR::TXFE::SET) {
arch::nop();
}
});
}
}
impl interface::console::Read for PL011Uart {
fn read_char(&self) -> char {
let mut r = &self.inner;
r.lock(|inner| {
// Spin while RX FIFO empty is set.
while inner.FR.matches_all(FR::RXFE::SET) {
arch::nop();
}
// Read one character.
let mut ret = inner.DR.get() as u8 as char;
// Convert carrige return to newline.
if ret == '\r' {
ret = '\n'
}
ret
})
}
fn clear(&self) {
let mut r = &self.inner;
r.lock(|inner| {
// Read from the RX FIFO until it is indicating empty.
while !inner.FR.matches_all(FR::RXFE::SET) {
inner.DR.get();
}
})
}
}
impl interface::console::Statistics for PL011Uart {
fn chars_written(&self) -> usize {
let mut r = &self.inner;
r.lock(|inner| inner.chars_written)
}
}

@ -0,0 +1,82 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Board Support Package for the Raspberry Pi.
mod memory_map;
mod virt_mem_layout;
use super::driver;
use crate::{interface, memory::KernelVirtualLayout};
use core::fmt;
pub const BOOT_CORE_ID: u64 = 0;
pub const BOOT_CORE_STACK_START: u64 = 0x80_000;
//--------------------------------------------------------------------------------------------------
// Global BSP driver instances
//--------------------------------------------------------------------------------------------------
static GPIO: driver::GPIO = unsafe { driver::GPIO::new(memory_map::mmio::GPIO_BASE) };
static PL011_UART: driver::PL011Uart =
unsafe { driver::PL011Uart::new(memory_map::mmio::PL011_UART_BASE) };
//--------------------------------------------------------------------------------------------------
// Implementation of the kernel's BSP calls
//--------------------------------------------------------------------------------------------------
/// Board identification.
pub fn board_name() -> &'static str {
#[cfg(feature = "bsp_rpi3")]
{
"Raspberry Pi 3"
}
#[cfg(feature = "bsp_rpi4")]
{
"Raspberry Pi 4"
}
}
/// Return a reference to a `console::All` implementation.
pub fn console() -> &'static impl interface::console::All {
&PL011_UART
}
/// In case of a panic, the panic handler uses this function to take a last shot at printing
/// something before the system is halted.
///
/// # Safety
///
/// - Use only for printing during a panic.
pub unsafe fn panic_console_out() -> impl fmt::Write {
let uart = driver::PanicUart::new(memory_map::mmio::PL011_UART_BASE);
uart.init();
uart
}
/// Return an array of references to all `DeviceDriver` compatible `BSP` drivers.
///
/// # Safety
///
/// The order of devices is the order in which `DeviceDriver::init()` is called.
pub fn device_drivers() -> [&'static dyn interface::driver::DeviceDriver; 2] {
[&GPIO, &PL011_UART]
}
/// BSP initialization code that runs after driver init.
pub fn post_driver_init() {
// Configure PL011Uart's output pins.
GPIO.map_pl011_uart();
}
/// Return the address space size in bytes.
pub const fn addr_space_size() -> usize {
memory_map::END_INCLUSIVE + 1
}
/// Return a reference to the virtual memory layout.
pub fn virt_mem_layout() -> &'static KernelVirtualLayout<{ virt_mem_layout::NUM_MEM_RANGES }> {
&virt_mem_layout::LAYOUT
}

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: MIT
*
* Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
*/
SECTIONS
{
/* Set current address to the value from which the RPi starts execution */
. = 0x80000;
__ro_start = .;
.text :
{
*(.text._start) *(.text*)
}
.rodata :
{
*(.rodata*)
}
. = ALIGN(65536); /* Fill up to 64 KiB */
__ro_end = .;
.data :
{
*(.data*)
}
/* Align to 8 byte boundary */
.bss ALIGN(8):
{
__bss_start = .;
*(.bss*);
__bss_end = .;
}
/DISCARD/ : { *(.comment*) }
}

@ -0,0 +1,27 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! The board's memory map.
#[cfg(feature = "bsp_rpi3")]
#[rustfmt::skip]
pub const END_INCLUSIVE: usize = 0x3FFF_FFFF;
#[cfg(feature = "bsp_rpi4")]
#[rustfmt::skip]
pub const END_INCLUSIVE: usize = 0xFFFF_FFFF;
/// Physical devices.
#[rustfmt::skip]
pub mod mmio {
#[cfg(feature = "bsp_rpi3")]
pub const BASE: usize = 0x3F00_0000;
#[cfg(feature = "bsp_rpi4")]
pub const BASE: usize = 0xFE00_0000;
pub const GPIO_BASE: usize = BASE + 0x0020_0000;
pub const PL011_UART_BASE: usize = BASE + 0x0020_1000;
pub const END_INCLUSIVE: usize = super::END_INCLUSIVE;
}

@ -0,0 +1,82 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! The virtual memory layout.
//!
//! The layout must contain only special ranges, aka anything that is _not_ normal cacheable DRAM.
//! It is agnostic of the paging granularity that the architecture's MMU will use.
use super::memory_map;
use crate::memory::*;
use core::ops::RangeInclusive;
//--------------------------------------------------------------------------------------------------
// BSP-public
//--------------------------------------------------------------------------------------------------
pub const NUM_MEM_RANGES: usize = 3;
pub static LAYOUT: KernelVirtualLayout<{ NUM_MEM_RANGES }> = KernelVirtualLayout::new(
memory_map::END_INCLUSIVE,
[
RangeDescriptor {
name: "Kernel code and RO data",
virtual_range: || {
// Using the linker script, we ensure that the RO area is consecutive and 4 KiB
// aligned, and we export the boundaries via symbols:
//
// [__ro_start, __ro_end)
extern "C" {
// The inclusive start of the read-only area, aka the address of the first
// byte of the area.
static __ro_start: u64;
// The exclusive end of the read-only area, aka the address of the first
// byte _after_ the RO area.
static __ro_end: u64;
}
unsafe {
// Notice the subtraction to turn the exclusive end into an inclusive end.
#[allow(clippy::range_minus_one)]
RangeInclusive::new(
&__ro_start as *const _ as usize,
&__ro_end as *const _ as usize - 1,
)
}
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadOnly,
execute_never: false,
},
},
RangeDescriptor {
name: "Remapped Device MMIO",
virtual_range: || {
// The last 64 KiB slot in the first 512 MiB
RangeInclusive::new(0x1FFF_0000, 0x1FFF_FFFF)
},
translation: Translation::Offset(memory_map::mmio::BASE + 0x20_0000),
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
RangeDescriptor {
name: "Device MMIO",
virtual_range: || {
RangeInclusive::new(memory_map::mmio::BASE, memory_map::mmio::END_INCLUSIVE)
},
translation: Translation::Identity,
attribute_fields: AttributeFields {
mem_attributes: MemAttributes::Device,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
},
},
],
);

@ -0,0 +1,137 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Trait definitions for coupling `kernel` and `BSP` code.
//!
//! ```
//! +-------------------+
//! | Interface (Trait) |
//! | |
//! +--+-------------+--+
//! ^ ^
//! | |
//! | |
//! +----------+--+ +--+----------+
//! | Kernel code | | BSP Code |
//! | | | |
//! +-------------+ +-------------+
//! ```
/// System console operations.
pub mod console {
use core::fmt;
/// Console write functions.
pub trait Write {
fn write_char(&self, c: char);
fn write_fmt(&self, args: fmt::Arguments) -> fmt::Result;
/// Block execution until the last character has been physically put on the TX wire
/// (draining TX buffers/FIFOs, if any).
fn flush(&self);
}
/// Console read functions.
pub trait Read {
fn read_char(&self) -> char {
' '
}
/// Clear RX buffers, if any.
fn clear(&self);
}
/// Console statistics.
pub trait Statistics {
/// Return the number of characters written.
fn chars_written(&self) -> usize {
0
}
/// Return the number of characters read.
fn chars_read(&self) -> usize {
0
}
}
/// Trait alias for a full-fledged console.
pub trait All = Write + Read + Statistics;
}
/// Synchronization primitives.
pub mod sync {
/// Any object implementing this trait guarantees exclusive access to the data contained within
/// the mutex for the duration of the lock.
///
/// The trait follows the [Rust embedded WG's
/// proposal](https://github.com/korken89/wg/blob/master/rfcs/0377-mutex-trait.md) and therefore
/// provides some goodness such as [deadlock
/// prevention](https://github.com/korken89/wg/blob/master/rfcs/0377-mutex-trait.md#design-decisions-and-compatibility).
///
/// # Example
///
/// Since the lock function takes an `&mut self` to enable deadlock-prevention, the trait is
/// best implemented **for a reference to a container struct**, and has a usage pattern that
/// might feel strange at first:
///
/// ```
/// static MUT: Mutex<RefCell<i32>> = Mutex::new(RefCell::new(0));
///
/// fn foo() {
/// let mut r = &MUT; // Note that r is mutable
/// r.lock(|data| *data += 1);
/// }
/// ```
pub trait Mutex {
/// Type of data encapsulated by the mutex.
type Data;
/// Creates a critical section and grants temporary mutable access to the encapsulated data.
fn lock<R>(&mut self, f: impl FnOnce(&mut Self::Data) -> R) -> R;
}
}
/// Driver interfaces.
pub mod driver {
/// Driver result type, e.g. for indicating successful driver init.
pub type Result = core::result::Result<(), ()>;
/// Device Driver functions.
pub trait DeviceDriver {
/// Return a compatibility string for identifying the driver.
fn compatible(&self) -> &str;
/// Called by the kernel to bring up the device.
fn init(&self) -> Result {
Ok(())
}
}
}
/// Timekeeping interfaces.
pub mod time {
use core::time::Duration;
/// Timer functions.
pub trait Timer {
/// The timer's resolution.
fn resolution(&self) -> Duration;
/// The uptime since power-on of the device.
///
/// This includes time consumed by firmware and bootloaders.
fn uptime(&self) -> Duration;
/// Spin for a given duration.
fn spin_for(&self, duration: Duration);
}
}
/// Memory Management interfaces.
pub mod mm {
pub trait MMU {
/// Called by the kernel early during init.
unsafe fn init(&self) -> Result<(), &'static str>;
}
}

@ -0,0 +1,136 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
// Rust embedded logo for `make doc`.
#![doc(html_logo_url = "https://git.io/JeGIp")]
//! The `kernel`
//!
//! The `kernel` is composed by glueing together code from
//!
//! - [Hardware-specific Board Support Packages] (`BSPs`).
//! - [Architecture-specific code].
//! - HW- and architecture-agnostic `kernel` code.
//!
//! using the [`kernel::interface`] traits.
//!
//! [Hardware-specific Board Support Packages]: bsp/index.html
//! [Architecture-specific code]: arch/index.html
//! [`kernel::interface`]: interface/index.html
#![allow(incomplete_features)]
#![feature(const_generics)]
#![feature(format_args_nl)]
#![feature(global_asm)]
#![feature(panic_info_message)]
#![feature(trait_alias)]
#![no_main]
#![no_std]
// Conditionally includes the selected `architecture` code, which provides the `_start()` function,
// the first function to run.
mod arch;
// `_start()` then calls `runtime_init::init()`, which on completion, jumps to `kernel_init()`.
mod runtime_init;
// Conditionally includes the selected `BSP` code.
mod bsp;
mod interface;
mod memory;
mod panic_wait;
mod print;
/// Early init code.
///
/// Concerned with with initializing `BSP` and `arch` parts.
///
/// # Safety
///
/// - Only a single core must be active and running this function.
/// - The init calls in this function must appear in the correct order:
/// - Virtual memory must be activated before the device drivers.
/// - Without it, any atomic operations, e.g. the yet-to-be-introduced spinlocks in the device
/// drivers (which currently employ NullLocks instead of spinlocks), will fail to work on
/// the RPi SoCs.
unsafe fn kernel_init() -> ! {
use interface::mm::MMU;
arch::enable_exception_handling();
if let Err(string) = arch::mmu().init() {
panic!("MMU: {}", string);
}
for i in bsp::device_drivers().iter() {
if let Err(()) = i.init() {
panic!("Error loading driver: {}", i.compatible())
}
}
bsp::post_driver_init();
// println! is usable from here on.
// Transition from unsafe to safe.
kernel_main()
}
/// The main function running after the early init.
fn kernel_main() -> ! {
use core::time::Duration;
use interface::{console::All, time::Timer};
info!("Booting on: {}", bsp::board_name());
info!("MMU online. Special regions:");
bsp::virt_mem_layout().print_layout();
info!(
"Current privilege level: {}",
arch::state::current_privilege_level()
);
info!("Exception handling state:");
arch::state::print_exception_state();
info!(
"Architectural timer resolution: {} ns",
arch::timer().resolution().as_nanos()
);
info!("Drivers loaded:");
for (i, driver) in bsp::device_drivers().iter().enumerate() {
info!(" {}. {}", i + 1, driver.compatible());
}
info!("Timer test, spinning for 1 second");
arch::timer().spin_for(Duration::from_secs(1));
// Cause an exception by accessing a virtual address for which no translation was set up. This
// code accesses the address 8 GiB, which is outside the mapped address space.
//
// For demo purposes, the exception handler will catch the faulting 8 GiB address and allow
// execution to continue.
info!("");
info!("Trying to write to address 8 GiB...");
let mut big_addr: u64 = 8 * 1024 * 1024 * 1024;
unsafe { core::ptr::read_volatile(big_addr as *mut u64) };
info!("************************************************");
info!("Whoa! We recovered from a synchronous exception!");
info!("************************************************");
info!("");
info!("Let's try again");
// Now use address 9 GiB. The exception handler won't forgive us this time.
info!("Trying to write to address 9 GiB...");
big_addr = 9 * 1024 * 1024 * 1024;
unsafe { core::ptr::read_volatile(big_addr as *mut u64) };
// Will never reach here in this tutorial.
info!("Echoing input now");
loop {
let c = bsp::console().read_char();
bsp::console().write_char(c);
}
}

@ -0,0 +1,147 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Memory Management.
use core::{fmt, ops::RangeInclusive};
#[derive(Copy, Clone)]
pub enum Translation {
Identity,
Offset(usize),
}
#[derive(Copy, Clone)]
pub enum MemAttributes {
CacheableDRAM,
Device,
}
#[derive(Copy, Clone)]
pub enum AccessPermissions {
ReadOnly,
ReadWrite,
}
#[derive(Copy, Clone)]
pub struct AttributeFields {
pub mem_attributes: MemAttributes,
pub acc_perms: AccessPermissions,
pub execute_never: bool,
}
impl Default for AttributeFields {
fn default() -> AttributeFields {
AttributeFields {
mem_attributes: MemAttributes::CacheableDRAM,
acc_perms: AccessPermissions::ReadWrite,
execute_never: true,
}
}
}
/// An architecture agnostic descriptor for a memory range.
pub struct RangeDescriptor {
pub name: &'static str,
pub virtual_range: fn() -> RangeInclusive<usize>,
pub translation: Translation,
pub attribute_fields: AttributeFields,
}
/// Human-readable output of a RangeDescriptor.
impl fmt::Display for RangeDescriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Call the function to which self.range points, and dereference the result, which causes
// Rust to copy the value.
let start = *(self.virtual_range)().start();
let end = *(self.virtual_range)().end();
let size = end - start + 1;
// log2(1024).
const KIB_RSHIFT: u32 = 10;
// log2(1024 * 1024).
const MIB_RSHIFT: u32 = 20;
let (size, unit) = if (size >> MIB_RSHIFT) > 0 {
(size >> MIB_RSHIFT, "MiB")
} else if (size >> KIB_RSHIFT) > 0 {
(size >> KIB_RSHIFT, "KiB")
} else {
(size, "Byte")
};
let attr = match self.attribute_fields.mem_attributes {
MemAttributes::CacheableDRAM => "C",
MemAttributes::Device => "Dev",
};
let acc_p = match self.attribute_fields.acc_perms {
AccessPermissions::ReadOnly => "RO",
AccessPermissions::ReadWrite => "RW",
};
let xn = if self.attribute_fields.execute_never {
"PXN"
} else {
"PX"
};
write!(
f,
" {:#010x} - {:#010x} | {: >3} {} | {: <3} {} {: <3} | {}",
start, end, size, unit, attr, acc_p, xn, self.name
)
}
}
/// Type for expressing the kernel's virtual memory layout.
pub struct KernelVirtualLayout<const NUM_SPECIAL_RANGES: usize> {
max_virt_addr_inclusive: usize,
inner: [RangeDescriptor; NUM_SPECIAL_RANGES],
}
impl<const NUM_SPECIAL_RANGES: usize> KernelVirtualLayout<{ NUM_SPECIAL_RANGES }> {
pub const fn new(max: usize, layout: [RangeDescriptor; NUM_SPECIAL_RANGES]) -> Self {
Self {
max_virt_addr_inclusive: max,
inner: layout,
}
}
/// For a virtual address, find and return the output address and corresponding attributes.
///
/// If the address is not found in `inner`, return an identity mapped default with normal
/// cacheable DRAM attributes.
pub fn get_virt_addr_properties(
&self,
virt_addr: usize,
) -> Result<(usize, AttributeFields), &'static str> {
if virt_addr > self.max_virt_addr_inclusive {
return Err("Address out of range");
}
for i in self.inner.iter() {
if (i.virtual_range)().contains(&virt_addr) {
let output_addr = match i.translation {
Translation::Identity => virt_addr,
Translation::Offset(a) => a + (virt_addr - (i.virtual_range)().start()),
};
return Ok((output_addr, i.attribute_fields));
}
}
Ok((virt_addr, AttributeFields::default()))
}
/// Print the memory layout.
pub fn print_layout(&self) {
use crate::info;
for i in self.inner.iter() {
info!("{}", i);
}
}
}

@ -0,0 +1,35 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! A panic handler that infinitely waits.
use crate::{arch, bsp};
use core::{fmt, panic::PanicInfo};
fn _panic_print(args: fmt::Arguments) {
use fmt::Write;
unsafe { bsp::panic_console_out().write_fmt(args).unwrap() };
}
/// Prints with a newline - only use from the panic handler.
///
/// Carbon copy from https://doc.rust-lang.org/src/std/macros.rs.html
#[macro_export]
macro_rules! panic_println {
($($arg:tt)*) => ({
_panic_print(format_args_nl!($($arg)*));
})
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
if let Some(args) = info.message() {
panic_println!("Kernel panic: {}", args);
} else {
panic_println!("Kernel panic!");
}
arch::wait_forever()
}

@ -0,0 +1,101 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Printing facilities.
use crate::{bsp, interface};
use core::fmt;
pub fn _print(args: fmt::Arguments) {
use interface::console::Write;
bsp::console().write_fmt(args).unwrap();
}
/// Prints without a newline.
///
/// Carbon copy from https://doc.rust-lang.org/src/std/macros.rs.html
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ($crate::print::_print(format_args!($($arg)*)));
}
/// Prints with a newline.
///
/// Carbon copy from https://doc.rust-lang.org/src/std/macros.rs.html
#[macro_export]
macro_rules! println {
() => ($crate::print!("\n"));
($($arg:tt)*) => ({
$crate::print::_print(format_args_nl!($($arg)*));
})
}
/// Prints am info, with newline.
#[macro_export]
macro_rules! info {
($string:expr) => ({
#[allow(unused_imports)]
use crate::interface::time::Timer;
let timestamp = $crate::arch::timer().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[ {:>3}.{:03}{:03}] ", $string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000
));
});
($format_string:expr, $($arg:tt)*) => ({
#[allow(unused_imports)]
use crate::interface::time::Timer;
let timestamp = $crate::arch::timer().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[ {:>3}.{:03}{:03}] ", $format_string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000,
$($arg)*
));
})
}
/// Prints a warning, with newline.
#[macro_export]
macro_rules! warn {
($string:expr) => ({
#[allow(unused_imports)]
use crate::interface::time::Timer;
let timestamp = $crate::arch::timer().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[W {:>3}.{:03}{:03}] ", $string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000
));
});
($format_string:expr, $($arg:tt)*) => ({
#[allow(unused_imports)]
use crate::interface::time::Timer;
let timestamp = $crate::arch::timer().uptime();
let timestamp_subsec_us = timestamp.subsec_micros();
$crate::print::_print(format_args_nl!(
concat!("[W {:>3}.{:03}{:03}] ", $format_string),
timestamp.as_secs(),
timestamp_subsec_us / 1_000,
timestamp_subsec_us % 1_000,
$($arg)*
));
})
}

@ -0,0 +1,24 @@
// SPDX-License-Identifier: MIT
//
// Copyright (c) 2018-2019 Andre Richter <andre.o.richter@gmail.com>
//! Rust runtime initialization code.
/// Equivalent to `crt0` or `c0` code in C/C++ world. Clears the `bss` section, then jumps to kernel
/// init code.
///
/// # Safety
///
/// - Only a single core must be active and running this function.
pub unsafe fn init() -> ! {
extern "C" {
// Boundaries of the .bss section, provided by the linker script.
static mut __bss_start: u64;
static mut __bss_end: u64;
}
// Zero out the .bss section.
r0::zero_bss(&mut __bss_start, &mut __bss_end);
crate::kernel_init()
}
Loading…
Cancel
Save