From fa3d640b118eaec94ecffb2d08e2945c443afca6 Mon Sep 17 00:00:00 2001 From: Andre Richter Date: Fri, 6 May 2022 22:26:37 +0200 Subject: [PATCH] Rename PageAllocator module --- 14_virtual_mem_part2_mmio_remap/README.md | 171 +++++++++--------- .../kernel/src/memory/mmu.rs | 8 +- .../memory/mmu/{alloc.rs => page_alloc.rs} | 2 +- .../README.md | 2 +- .../kernel/src/memory/mmu.rs | 6 +- .../memory/mmu/{alloc.rs => page_alloc.rs} | 2 +- .../kernel/src/memory/mmu.rs | 6 +- .../memory/mmu/{alloc.rs => page_alloc.rs} | 2 +- 17_kernel_symbols/kernel/src/memory/mmu.rs | 6 +- .../memory/mmu/{alloc.rs => page_alloc.rs} | 2 +- 18_backtrace/kernel/src/memory/mmu.rs | 6 +- 18_backtrace/kernel/src/memory/mmu/alloc.rs | 70 ------- .../kernel/src/memory/mmu/page_alloc.rs | 70 +++++++ 13 files changed, 177 insertions(+), 176 deletions(-) rename 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/{alloc.rs => page_alloc.rs} (99%) rename 15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu/{alloc.rs => page_alloc.rs} (99%) rename 16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu/{alloc.rs => page_alloc.rs} (99%) rename 17_kernel_symbols/kernel/src/memory/mmu/{alloc.rs => page_alloc.rs} (99%) delete mode 100644 18_backtrace/kernel/src/memory/mmu/alloc.rs create mode 100644 18_backtrace/kernel/src/memory/mmu/page_alloc.rs diff --git a/14_virtual_mem_part2_mmio_remap/README.md b/14_virtual_mem_part2_mmio_remap/README.md index 329e2105..5d78e237 100644 --- a/14_virtual_mem_part2_mmio_remap/README.md +++ b/14_virtual_mem_part2_mmio_remap/README.md @@ -279,7 +279,7 @@ pub unsafe fn kernel_map_mmio( // omitted let virt_region = - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; kernel_map_at_unchecked( name, @@ -296,10 +296,11 @@ pub unsafe fn kernel_map_mmio( } ``` -This allocator is defined and implemented in the added file `src/memory/mmu/alloc.rs`. Like other -parts of the mapping code, its implementation makes use of the newly introduced `PageAddress` -and `MemoryRegion` types (in [`src/memory/mmu/types.rs`](kernel/src/memory/mmu/types.rs)), -but apart from that is rather straight forward. Therefore, it won't be covered in details here. +This allocator is defined and implemented in the added file `src/memory/mmu/paeg_alloc.rs`. Like +other parts of the mapping code, its implementation makes use of the newly introduced +`PageAddress` and `MemoryRegion` types (in +[`src/memory/mmu/types.rs`](kernel/src/memory/mmu/types.rs)), but apart from that is rather straight +forward. Therefore, it won't be covered in details here. The more interesting question is: How does the allocator get to learn which VAs it can use? @@ -313,7 +314,7 @@ been turned on. fn kernel_init_mmio_va_allocator() { let region = bsp::memory::mmu::virt_mmio_remap_region(); - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); } ``` @@ -2227,81 +2228,6 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/main.rs 14_virtual_mem_ let (_, privilege_level) = exception::current_privilege_level(); info!("Current privilege level: {}", privilege_level); -diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/alloc.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/alloc.rs ---- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/alloc.rs -+++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/alloc.rs -@@ -0,0 +1,70 @@ -+// SPDX-License-Identifier: MIT OR Apache-2.0 -+// -+// Copyright (c) 2021-2022 Andre Richter -+ -+//! Allocation. -+ -+use super::MemoryRegion; -+use crate::{ -+ memory::{AddressType, Virtual}, -+ synchronization::IRQSafeNullLock, -+ warn, -+}; -+use core::num::NonZeroUsize; -+ -+//-------------------------------------------------------------------------------------------------- -+// Public Definitions -+//-------------------------------------------------------------------------------------------------- -+ -+/// A page allocator that can be lazyily initialized. -+pub struct PageAllocator { -+ pool: Option>, -+} -+ -+//-------------------------------------------------------------------------------------------------- -+// Global instances -+//-------------------------------------------------------------------------------------------------- -+ -+static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock> = -+ IRQSafeNullLock::new(PageAllocator::new()); -+ -+//-------------------------------------------------------------------------------------------------- -+// Public Code -+//-------------------------------------------------------------------------------------------------- -+ -+/// Return a reference to the kernel's MMIO virtual address allocator. -+pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock> { -+ &KERNEL_MMIO_VA_ALLOCATOR -+} -+ -+impl PageAllocator { -+ /// Create an instance. -+ pub const fn new() -> Self { -+ Self { pool: None } -+ } -+ -+ /// Initialize the allocator. -+ pub fn initialize(&mut self, pool: MemoryRegion) { -+ if self.pool.is_some() { -+ warn!("Already initialized"); -+ return; -+ } -+ -+ self.pool = Some(pool); -+ } -+ -+ /// Allocate a number of pages. -+ pub fn alloc( -+ &mut self, -+ num_requested_pages: NonZeroUsize, -+ ) -> Result, &'static str> { -+ if self.pool.is_none() { -+ return Err("Allocator not initialized"); -+ } -+ -+ self.pool -+ .as_mut() -+ .unwrap() -+ .take_first_n_pages(num_requested_pages) -+ } -+} - diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/mapping_record.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/mapping_record.rs --- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/mapping_record.rs +++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/mapping_record.rs @@ -2540,6 +2466,81 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/mapping_reco + KERNEL_MAPPING_RECORD.read(|mr| mr.print()); +} +diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/page_alloc.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/page_alloc.rs +--- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/page_alloc.rs ++++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/page_alloc.rs +@@ -0,0 +1,70 @@ ++// SPDX-License-Identifier: MIT OR Apache-2.0 ++// ++// Copyright (c) 2021-2022 Andre Richter ++ ++//! Page allocation. ++ ++use super::MemoryRegion; ++use crate::{ ++ memory::{AddressType, Virtual}, ++ synchronization::IRQSafeNullLock, ++ warn, ++}; ++use core::num::NonZeroUsize; ++ ++//-------------------------------------------------------------------------------------------------- ++// Public Definitions ++//-------------------------------------------------------------------------------------------------- ++ ++/// A page allocator that can be lazyily initialized. ++pub struct PageAllocator { ++ pool: Option>, ++} ++ ++//-------------------------------------------------------------------------------------------------- ++// Global instances ++//-------------------------------------------------------------------------------------------------- ++ ++static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock> = ++ IRQSafeNullLock::new(PageAllocator::new()); ++ ++//-------------------------------------------------------------------------------------------------- ++// Public Code ++//-------------------------------------------------------------------------------------------------- ++ ++/// Return a reference to the kernel's MMIO virtual address allocator. ++pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock> { ++ &KERNEL_MMIO_VA_ALLOCATOR ++} ++ ++impl PageAllocator { ++ /// Create an instance. ++ pub const fn new() -> Self { ++ Self { pool: None } ++ } ++ ++ /// Initialize the allocator. ++ pub fn initialize(&mut self, pool: MemoryRegion) { ++ if self.pool.is_some() { ++ warn!("Already initialized"); ++ return; ++ } ++ ++ self.pool = Some(pool); ++ } ++ ++ /// Allocate a number of pages. ++ pub fn alloc( ++ &mut self, ++ num_requested_pages: NonZeroUsize, ++ ) -> Result, &'static str> { ++ if self.pool.is_none() { ++ return Err("Allocator not initialized"); ++ } ++ ++ self.pool ++ .as_mut() ++ .unwrap() ++ .take_first_n_pages(num_requested_pages) ++ } ++} + diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/translation_table.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/translation_table.rs --- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/translation_table.rs +++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/translation_table.rs @@ -3037,8 +3038,8 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua #[path = "../_arch/aarch64/memory/mmu.rs"] mod arch_mmu; -+mod alloc; +mod mapping_record; ++mod page_alloc; mod translation_table; +mod types; @@ -3140,7 +3141,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua +fn kernel_init_mmio_va_allocator() { + let region = bsp::memory::mmu::virt_mmio_remap_region(); + -+ alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); ++ page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); +} + +/// Map a region in the kernel's translation tables. @@ -3280,7 +3281,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua - "PX" - }; + let virt_region = -+ alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; ++ page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; - write!( - f, @@ -3399,7 +3400,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua + let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr); + + let num_pages = NonZeroUsize::new(phys_region.num_pages()).unwrap(); -+ let virt_region = alloc::kernel_mmio_va_allocator() ++ let virt_region = page_alloc::kernel_mmio_va_allocator() + .lock(|allocator| allocator.alloc(num_pages)) + .unwrap(); diff --git a/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu.rs b/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu.rs index b0616f88..43602470 100644 --- a/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu.rs +++ b/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu.rs @@ -8,8 +8,8 @@ #[path = "../_arch/aarch64/memory/mmu.rs"] mod arch_mmu; -mod alloc; mod mapping_record; +mod page_alloc; mod translation_table; mod types; @@ -81,7 +81,7 @@ use translation_table::interface::TranslationTable; fn kernel_init_mmio_va_allocator() { let region = bsp::memory::mmu::virt_mmio_remap_region(); - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); } /// Map a region in the kernel's translation tables. @@ -205,7 +205,7 @@ pub unsafe fn kernel_map_mmio( }; let virt_region = - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; kernel_map_at_unchecked( name, @@ -281,7 +281,7 @@ mod tests { let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr); let num_pages = NonZeroUsize::new(phys_region.num_pages()).unwrap(); - let virt_region = alloc::kernel_mmio_va_allocator() + let virt_region = page_alloc::kernel_mmio_va_allocator() .lock(|allocator| allocator.alloc(num_pages)) .unwrap(); diff --git a/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/alloc.rs b/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/page_alloc.rs similarity index 99% rename from 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/alloc.rs rename to 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/page_alloc.rs index aadb72ef..b4c4232c 100644 --- a/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/alloc.rs +++ b/14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/page_alloc.rs @@ -2,7 +2,7 @@ // // Copyright (c) 2021-2022 Andre Richter -//! Allocation. +//! Page allocation. use super::MemoryRegion; use crate::{ diff --git a/15_virtual_mem_part3_precomputed_tables/README.md b/15_virtual_mem_part3_precomputed_tables/README.md index 5836e12f..d3ec21e2 100644 --- a/15_virtual_mem_part3_precomputed_tables/README.md +++ b/15_virtual_mem_part3_precomputed_tables/README.md @@ -1663,7 +1663,7 @@ diff -uNr 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu.rs 15_virtual_me - let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr); - - let num_pages = NonZeroUsize::new(phys_region.num_pages()).unwrap(); -- let virt_region = alloc::kernel_mmio_va_allocator() +- let virt_region = page_alloc::kernel_mmio_va_allocator() - .lock(|allocator| allocator.alloc(num_pages)) - .unwrap(); - diff --git a/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu.rs b/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu.rs index 23dc7094..c6461474 100644 --- a/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu.rs +++ b/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu.rs @@ -8,8 +8,8 @@ #[path = "../_arch/aarch64/memory/mmu.rs"] mod arch_mmu; -mod alloc; mod mapping_record; +mod page_alloc; mod translation_table; mod types; @@ -82,7 +82,7 @@ use translation_table::interface::TranslationTable; fn kernel_init_mmio_va_allocator() { let region = bsp::memory::mmu::virt_mmio_remap_region(); - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); } /// Map a region in the kernel's translation tables. @@ -203,7 +203,7 @@ pub unsafe fn kernel_map_mmio( }; let virt_region = - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; kernel_map_at_unchecked( name, diff --git a/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu/alloc.rs b/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu/page_alloc.rs similarity index 99% rename from 15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu/alloc.rs rename to 15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu/page_alloc.rs index aadb72ef..b4c4232c 100644 --- a/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu/alloc.rs +++ b/15_virtual_mem_part3_precomputed_tables/kernel/src/memory/mmu/page_alloc.rs @@ -2,7 +2,7 @@ // // Copyright (c) 2021-2022 Andre Richter -//! Allocation. +//! Page allocation. use super::MemoryRegion; use crate::{ diff --git a/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu.rs b/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu.rs index dfc29993..8806a993 100644 --- a/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu.rs +++ b/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu.rs @@ -8,8 +8,8 @@ #[path = "../_arch/aarch64/memory/mmu.rs"] mod arch_mmu; -mod alloc; mod mapping_record; +mod page_alloc; mod translation_table; mod types; @@ -87,7 +87,7 @@ use translation_table::interface::TranslationTable; fn kernel_init_mmio_va_allocator() { let region = bsp::memory::mmu::virt_mmio_remap_region(); - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); } /// Map a region in the kernel's translation tables. @@ -208,7 +208,7 @@ pub unsafe fn kernel_map_mmio( }; let virt_region = - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; kernel_map_at_unchecked( name, diff --git a/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu/alloc.rs b/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu/page_alloc.rs similarity index 99% rename from 16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu/alloc.rs rename to 16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu/page_alloc.rs index aadb72ef..b4c4232c 100644 --- a/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu/alloc.rs +++ b/16_virtual_mem_part4_higher_half_kernel/kernel/src/memory/mmu/page_alloc.rs @@ -2,7 +2,7 @@ // // Copyright (c) 2021-2022 Andre Richter -//! Allocation. +//! Page allocation. use super::MemoryRegion; use crate::{ diff --git a/17_kernel_symbols/kernel/src/memory/mmu.rs b/17_kernel_symbols/kernel/src/memory/mmu.rs index dfc29993..8806a993 100644 --- a/17_kernel_symbols/kernel/src/memory/mmu.rs +++ b/17_kernel_symbols/kernel/src/memory/mmu.rs @@ -8,8 +8,8 @@ #[path = "../_arch/aarch64/memory/mmu.rs"] mod arch_mmu; -mod alloc; mod mapping_record; +mod page_alloc; mod translation_table; mod types; @@ -87,7 +87,7 @@ use translation_table::interface::TranslationTable; fn kernel_init_mmio_va_allocator() { let region = bsp::memory::mmu::virt_mmio_remap_region(); - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); } /// Map a region in the kernel's translation tables. @@ -208,7 +208,7 @@ pub unsafe fn kernel_map_mmio( }; let virt_region = - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; kernel_map_at_unchecked( name, diff --git a/17_kernel_symbols/kernel/src/memory/mmu/alloc.rs b/17_kernel_symbols/kernel/src/memory/mmu/page_alloc.rs similarity index 99% rename from 17_kernel_symbols/kernel/src/memory/mmu/alloc.rs rename to 17_kernel_symbols/kernel/src/memory/mmu/page_alloc.rs index aadb72ef..b4c4232c 100644 --- a/17_kernel_symbols/kernel/src/memory/mmu/alloc.rs +++ b/17_kernel_symbols/kernel/src/memory/mmu/page_alloc.rs @@ -2,7 +2,7 @@ // // Copyright (c) 2021-2022 Andre Richter -//! Allocation. +//! Page allocation. use super::MemoryRegion; use crate::{ diff --git a/18_backtrace/kernel/src/memory/mmu.rs b/18_backtrace/kernel/src/memory/mmu.rs index dfc29993..8806a993 100644 --- a/18_backtrace/kernel/src/memory/mmu.rs +++ b/18_backtrace/kernel/src/memory/mmu.rs @@ -8,8 +8,8 @@ #[path = "../_arch/aarch64/memory/mmu.rs"] mod arch_mmu; -mod alloc; mod mapping_record; +mod page_alloc; mod translation_table; mod types; @@ -87,7 +87,7 @@ use translation_table::interface::TranslationTable; fn kernel_init_mmio_va_allocator() { let region = bsp::memory::mmu::virt_mmio_remap_region(); - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region)); } /// Map a region in the kernel's translation tables. @@ -208,7 +208,7 @@ pub unsafe fn kernel_map_mmio( }; let virt_region = - alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; + page_alloc::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?; kernel_map_at_unchecked( name, diff --git a/18_backtrace/kernel/src/memory/mmu/alloc.rs b/18_backtrace/kernel/src/memory/mmu/alloc.rs deleted file mode 100644 index aadb72ef..00000000 --- a/18_backtrace/kernel/src/memory/mmu/alloc.rs +++ /dev/null @@ -1,70 +0,0 @@ -// SPDX-License-Identifier: MIT OR Apache-2.0 -// -// Copyright (c) 2021-2022 Andre Richter - -//! Allocation. - -use super::MemoryRegion; -use crate::{ - memory::{AddressType, Virtual}, - synchronization::IRQSafeNullLock, - warn, -}; -use core::num::NonZeroUsize; - -//-------------------------------------------------------------------------------------------------- -// Public Definitions -//-------------------------------------------------------------------------------------------------- - -/// A page allocator that can be lazyily initialized. -pub struct PageAllocator { - pool: Option>, -} - -//-------------------------------------------------------------------------------------------------- -// Global instances -//-------------------------------------------------------------------------------------------------- - -static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock> = - IRQSafeNullLock::new(PageAllocator::new()); - -//-------------------------------------------------------------------------------------------------- -// Public Code -//-------------------------------------------------------------------------------------------------- - -/// Return a reference to the kernel's MMIO virtual address allocator. -pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock> { - &KERNEL_MMIO_VA_ALLOCATOR -} - -impl PageAllocator { - /// Create an instance. - pub const fn new() -> Self { - Self { pool: None } - } - - /// Initialize the allocator. - pub fn initialize(&mut self, pool: MemoryRegion) { - if self.pool.is_some() { - warn!("Already initialized"); - return; - } - - self.pool = Some(pool); - } - - /// Allocate a number of pages. - pub fn alloc( - &mut self, - num_requested_pages: NonZeroUsize, - ) -> Result, &'static str> { - if self.pool.is_none() { - return Err("Allocator not initialized"); - } - - self.pool - .as_mut() - .unwrap() - .take_first_n_pages(num_requested_pages) - } -} diff --git a/18_backtrace/kernel/src/memory/mmu/page_alloc.rs b/18_backtrace/kernel/src/memory/mmu/page_alloc.rs new file mode 100644 index 00000000..b4c4232c --- /dev/null +++ b/18_backtrace/kernel/src/memory/mmu/page_alloc.rs @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2021-2022 Andre Richter + +//! Page allocation. + +use super::MemoryRegion; +use crate::{ + memory::{AddressType, Virtual}, + synchronization::IRQSafeNullLock, + warn, +}; +use core::num::NonZeroUsize; + +//-------------------------------------------------------------------------------------------------- +// Public Definitions +//-------------------------------------------------------------------------------------------------- + +/// A page allocator that can be lazyily initialized. +pub struct PageAllocator { + pool: Option>, +} + +//-------------------------------------------------------------------------------------------------- +// Global instances +//-------------------------------------------------------------------------------------------------- + +static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock> = + IRQSafeNullLock::new(PageAllocator::new()); + +//-------------------------------------------------------------------------------------------------- +// Public Code +//-------------------------------------------------------------------------------------------------- + +/// Return a reference to the kernel's MMIO virtual address allocator. +pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock> { + &KERNEL_MMIO_VA_ALLOCATOR +} + +impl PageAllocator { + /// Create an instance. + pub const fn new() -> Self { + Self { pool: None } + } + + /// Initialize the allocator. + pub fn initialize(&mut self, pool: MemoryRegion) { + if self.pool.is_some() { + warn!("Already initialized"); + return; + } + + self.pool = Some(pool); + } + + /// Allocate a number of pages. + pub fn alloc( + &mut self, + num_requested_pages: NonZeroUsize, + ) -> Result, &'static str> { + if self.pool.is_none() { + return Err("Allocator not initialized"); + } + + self.pool + .as_mut() + .unwrap() + .take_first_n_pages(num_requested_pages) + } +}