From 2c917ba3835be39669f3f3b7173eeeb8c3d5922f Mon Sep 17 00:00:00 2001 From: Zhang Junyang Date: Tue, 29 Apr 2025 18:16:06 +0800 Subject: [PATCH] Unify page table template parameters --- ostd/src/arch/riscv/mm/mod.rs | 1 + .../x86/iommu/dma_remapping/context_table.rs | 10 +- ostd/src/arch/x86/iommu/dma_remapping/mod.rs | 4 +- .../x86/iommu/dma_remapping/second_stage.rs | 19 +- ostd/src/arch/x86/mm/mod.rs | 1 + ostd/src/lib.rs | 3 +- ostd/src/mm/kspace/mod.rs | 17 +- ostd/src/mm/mod.rs | 15 +- ostd/src/mm/page_table/cursor/locking.rs | 48 ++--- ostd/src/mm/page_table/cursor/mod.rs | 64 +++--- ostd/src/mm/page_table/mod.rs | 186 ++++++++++++------ ostd/src/mm/page_table/node/child.rs | 27 +-- ostd/src/mm/page_table/node/entry.rs | 33 ++-- ostd/src/mm/page_table/node/mod.rs | 65 +++--- ostd/src/mm/page_table/test.rs | 108 +++++----- ostd/src/mm/vm_space.rs | 19 +- 16 files changed, 364 insertions(+), 256 deletions(-) diff --git a/ostd/src/arch/riscv/mm/mod.rs b/ostd/src/arch/riscv/mm/mod.rs index d0544d4fb..ecab73b58 100644 --- a/ostd/src/arch/riscv/mm/mod.rs +++ b/ostd/src/arch/riscv/mm/mod.rs @@ -22,6 +22,7 @@ impl PagingConstsTrait for PagingConsts { const BASE_PAGE_SIZE: usize = 4096; const NR_LEVELS: PagingLevel = 4; const ADDRESS_WIDTH: usize = 48; + const VA_SIGN_EXT: bool = true; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 4; const PTE_SIZE: usize = core::mem::size_of::(); } diff --git a/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs b/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs index 8590c3c3e..67c9c2d6e 100644 --- a/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs +++ b/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs @@ -8,7 +8,7 @@ use core::mem::size_of; use log::trace; use ostd_pod::Pod; -use super::second_stage::{DeviceMode, PageTableEntry, PagingConsts}; +use super::second_stage::IommuPtConfig; use crate::{ bus::pci::PciDeviceLocation, mm::{ @@ -107,7 +107,7 @@ impl RootTable { pub(super) fn specify_device_page_table( &mut self, device_id: PciDeviceLocation, - page_table: PageTable, + page_table: PageTable, ) { let context_table = self.get_or_create_context_table(device_id); @@ -241,7 +241,7 @@ pub enum AddressWidth { pub struct ContextTable { /// Total 32 devices, each device has 8 functions. entries_frame: Frame<()>, - page_tables: BTreeMap>, + page_tables: BTreeMap>, } impl ContextTable { @@ -259,7 +259,7 @@ impl ContextTable { fn get_or_create_page_table( &mut self, device: PciDeviceLocation, - ) -> &mut PageTable { + ) -> &mut PageTable { let bus_entry = self .entries_frame .read_val::( @@ -268,7 +268,7 @@ impl ContextTable { .unwrap(); if !bus_entry.is_present() { - let table = PageTable::::empty(); + let table = PageTable::::empty(); let address = table.root_paddr(); self.page_tables.insert(address, table); let entry = ContextEntry(address as u128 | 3 | 0x1_0000_0000_0000_0000); diff --git a/ostd/src/arch/x86/iommu/dma_remapping/mod.rs b/ostd/src/arch/x86/iommu/dma_remapping/mod.rs index 64e5fe0bd..5093945d2 100644 --- a/ostd/src/arch/x86/iommu/dma_remapping/mod.rs +++ b/ostd/src/arch/x86/iommu/dma_remapping/mod.rs @@ -2,7 +2,7 @@ pub use context_table::RootTable; use log::{info, warn}; -use second_stage::{DeviceMode, PageTableEntry, PagingConsts}; +use second_stage::IommuPtConfig; use spin::Once; use super::IommuError; @@ -84,7 +84,7 @@ pub fn init() { // Memory Region Reporting (RMRR) structures. These regions must be mapped for the hardware or // firmware to function properly. For more details, see Intel(R) Virtualization Technology for // Directed I/O (Revision 5.0), 3.16 Handling Requests to Reserved System Memory. - let page_table = PageTable::::empty(); + let page_table = PageTable::::empty(); for table in PciDeviceLocation::all() { root_table.specify_device_page_table(table, unsafe { page_table.shallow_copy() }) } diff --git a/ostd/src/arch/x86/iommu/dma_remapping/second_stage.rs b/ostd/src/arch/x86/iommu/dma_remapping/second_stage.rs index 35176b5ad..b3eb9fb7e 100644 --- a/ostd/src/arch/x86/iommu/dma_remapping/second_stage.rs +++ b/ostd/src/arch/x86/iommu/dma_remapping/second_stage.rs @@ -7,8 +7,8 @@ use core::ops::Range; use crate::{ mm::{ page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags}, - page_table::{PageTableEntryTrait, PageTableMode}, - Paddr, PageProperty, PagingConstsTrait, PagingLevel, PodOnce, Vaddr, + page_table::{PageTableConfig, PageTableEntryTrait}, + Paddr, PageProperty, PagingConstsTrait, PagingLevel, PodOnce, }, util::marker::SameSizeAs, Pod, @@ -17,20 +17,25 @@ use crate::{ /// The page table used by iommu maps the device address /// space to the physical address space. #[derive(Clone, Debug)] -pub struct DeviceMode {} +pub(crate) struct IommuPtConfig {} -impl PageTableMode for DeviceMode { - /// The device address width we currently support is 39-bit. - const VADDR_RANGE: Range = 0..0x80_0000_0000; +impl PageTableConfig for IommuPtConfig { + /// From section 3.6 in "Intel(R) Virtualization Technology for Directed I/O", + /// only low canonical addresses can be used. + const TOP_LEVEL_INDEX_RANGE: Range = 0..256; + + type E = PageTableEntry; + type C = PagingConsts; } #[derive(Clone, Debug, Default)] -pub(super) struct PagingConsts {} +pub(crate) struct PagingConsts {} impl PagingConstsTrait for PagingConsts { const BASE_PAGE_SIZE: usize = 4096; const NR_LEVELS: PagingLevel = 3; const ADDRESS_WIDTH: usize = 39; + const VA_SIGN_EXT: bool = true; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 1; const PTE_SIZE: usize = core::mem::size_of::(); } diff --git a/ostd/src/arch/x86/mm/mod.rs b/ostd/src/arch/x86/mm/mod.rs index 9aa15c67f..cf9f24056 100644 --- a/ostd/src/arch/x86/mm/mod.rs +++ b/ostd/src/arch/x86/mm/mod.rs @@ -30,6 +30,7 @@ impl PagingConstsTrait for PagingConsts { const BASE_PAGE_SIZE: usize = 4096; const NR_LEVELS: PagingLevel = 4; const ADDRESS_WIDTH: usize = 48; + const VA_SIGN_EXT: bool = true; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 2; const PTE_SIZE: usize = core::mem::size_of::(); } diff --git a/ostd/src/lib.rs b/ostd/src/lib.rs index a91c2b976..e1b797273 100644 --- a/ostd/src/lib.rs +++ b/ostd/src/lib.rs @@ -9,6 +9,7 @@ #![feature(core_intrinsics)] #![feature(coroutines)] #![feature(fn_traits)] +#![feature(iter_advance_by)] #![feature(iter_from_coroutine)] #![feature(let_chains)] #![feature(linkage)] @@ -18,7 +19,7 @@ #![feature(ptr_sub_ptr)] #![feature(sync_unsafe_cell)] #![feature(trait_upcasting)] -#![feature(iter_advance_by)] +#![feature(unbounded_shifts)] #![expect(internal_features)] #![no_std] #![warn(missing_docs)] diff --git a/ostd/src/mm/kspace/mod.rs b/ostd/src/mm/kspace/mod.rs index 25a90b77a..62f8dff88 100644 --- a/ostd/src/mm/kspace/mod.rs +++ b/ostd/src/mm/kspace/mod.rs @@ -51,7 +51,7 @@ use super::{ Frame, Segment, }, page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, - page_table::{KernelMode, PageTable}, + page_table::{PageTable, PageTableConfig}, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE, }; use crate::{ @@ -115,12 +115,21 @@ pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool { !(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr)) } +#[derive(Clone, Debug)] +pub(crate) struct KernelPtConfig {} + +impl PageTableConfig for KernelPtConfig { + const TOP_LEVEL_INDEX_RANGE: Range = 256..512; + + type E = PageTableEntry; + type C = PagingConsts; +} + /// The kernel page table instance. /// /// It manages the kernel mapping of all address spaces by sharing the kernel part. And it /// is unlikely to be activated. -pub static KERNEL_PAGE_TABLE: Once> = - Once::new(); +pub static KERNEL_PAGE_TABLE: Once> = Once::new(); /// Initializes the kernel page table. /// @@ -134,7 +143,7 @@ pub fn init_kernel_page_table(meta_pages: Segment) { info!("Initializing the kernel page table"); // Start to initialize the kernel page table. - let kpt = PageTable::::new_kernel_page_table(); + let kpt = PageTable::::new_kernel_page_table(); let preempt_guard = disable_preempt(); // Do linear mappings for the kernel. diff --git a/ostd/src/mm/mod.rs b/ostd/src/mm/mod.rs index fa58c042e..c9d3b08e5 100644 --- a/ostd/src/mm/mod.rs +++ b/ostd/src/mm/mod.rs @@ -49,7 +49,7 @@ pub type PagingLevel = u8; /// A minimal set of constants that determines the paging system. /// This provides an abstraction over most paging modes in common architectures. -pub(crate) trait PagingConstsTrait: Clone + Debug + Default + Send + Sync + 'static { +pub(crate) trait PagingConstsTrait: Clone + Debug + Send + Sync + 'static { /// The smallest page size. /// This is also the page size at level 1 page tables. const BASE_PAGE_SIZE: usize; @@ -71,6 +71,19 @@ pub(crate) trait PagingConstsTrait: Clone + Debug + Default + Send + Sync + 'sta /// The address width may be BASE_PAGE_SIZE.ilog2() + NR_LEVELS * IN_FRAME_INDEX_BITS. /// If it is shorter than that, the higher bits in the highest level are ignored. const ADDRESS_WIDTH: usize; + + /// Whether virtual addresses are sign-extended. + /// + /// The sign bit of a [`Vaddr`] is the bit at index [`PagingConstsTrait::ADDRESS_WIDTH`] - 1. + /// If this constant is `true`, bits in [`Vaddr`] that are higher than the sign bit must be + /// equal to the sign bit. If an address violates this rule, both the hardware and OSTD + /// should reject it. + /// + /// Otherwise, if this constant is `false`, higher bits must be zero. + /// + /// Regardless of sign extension, [`Vaddr`] is always not signed upon calculation. + /// That means, `0xffff_ffff_ffff_0000 < 0xffff_ffff_ffff_0001` is `true`. + const VA_SIGN_EXT: bool; } /// The page size diff --git a/ostd/src/mm/page_table/cursor/locking.rs b/ostd/src/mm/page_table/cursor/locking.rs index 8d3185715..5d4829173 100644 --- a/ostd/src/mm/page_table/cursor/locking.rs +++ b/ostd/src/mm/page_table/cursor/locking.rs @@ -11,21 +11,20 @@ use crate::{ mm::{ nr_subpage_per_huge, paddr_to_vaddr, page_table::{ - load_pte, page_size, pte_index, Child, MapTrackingStatus, PageTable, - PageTableEntryTrait, PageTableGuard, PageTableMode, PageTableNodeRef, - PagingConstsTrait, PagingLevel, + load_pte, page_size, pte_index, Child, MapTrackingStatus, PageTable, PageTableConfig, + PageTableEntryTrait, PageTableGuard, PageTableNodeRef, PagingConstsTrait, PagingLevel, }, Vaddr, }, task::atomic_mode::InAtomicMode, }; -pub(super) fn lock_range<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( - pt: &'rcu PageTable, +pub(super) fn lock_range<'rcu, C: PageTableConfig>( + pt: &'rcu PageTable, guard: &'rcu dyn InAtomicMode, va: &Range, new_pt_is_tracked: MapTrackingStatus, -) -> Cursor<'rcu, M, E, C> { +) -> Cursor<'rcu, C> { // The re-try loop of finding the sub-tree root. // // If we locked a stray node, we need to re-try. Otherwise, although @@ -49,7 +48,7 @@ pub(super) fn lock_range<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: Pagi let mut path = core::array::from_fn(|_| None); path[guard_level as usize - 1] = Some(subtree_root); - Cursor::<'rcu, M, E, C> { + Cursor::<'rcu, C> { path, rcu_guard: guard, level: guard_level, @@ -60,9 +59,7 @@ pub(super) fn lock_range<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: Pagi } } -pub(super) fn unlock_range( - cursor: &mut Cursor<'_, M, E, C>, -) { +pub(super) fn unlock_range(cursor: &mut Cursor<'_, C>) { for i in (0..cursor.guard_level as usize - 1).rev() { if let Some(guard) = cursor.path[i].take() { let _ = ManuallyDrop::new(guard); @@ -92,18 +89,13 @@ pub(super) fn unlock_range( - pt: &PageTable, +fn try_traverse_and_lock_subtree_root<'rcu, C: PageTableConfig>( + pt: &PageTable, guard: &'rcu dyn InAtomicMode, va: &Range, new_pt_is_tracked: MapTrackingStatus, -) -> Option> { - let mut cur_node_guard: Option> = None; +) -> Option> { + let mut cur_node_guard: Option> = None; let mut cur_pt_addr = pt.root.start_paddr(); for cur_level in (1..=C::NR_LEVELS).rev() { let start_idx = pte_index::(va.start, cur_level); @@ -115,7 +107,7 @@ fn try_traverse_and_lock_subtree_root< break; } - let cur_pt_ptr = paddr_to_vaddr(cur_pt_addr) as *mut E; + let cur_pt_ptr = paddr_to_vaddr(cur_pt_addr) as *mut C::E; // SAFETY: // - The page table node is alive because (1) the root node is alive and // (2) all child nodes cannot be recycled because we're in the RCU critical section. @@ -136,7 +128,7 @@ fn try_traverse_and_lock_subtree_root< let mut pt_guard = cur_node_guard.take().unwrap_or_else(|| { // SAFETY: The node must be alive for at least `'rcu` since the // address is read from the page table node. - let node_ref = unsafe { PageTableNodeRef::<'rcu, E, C>::borrow_paddr(cur_pt_addr) }; + let node_ref = unsafe { PageTableNodeRef::<'rcu, C>::borrow_paddr(cur_pt_addr) }; node_ref.lock(guard) }); if *pt_guard.stray_mut() { @@ -162,7 +154,7 @@ fn try_traverse_and_lock_subtree_root< let mut pt_guard = cur_node_guard.unwrap_or_else(|| { // SAFETY: The node must be alive for at least `'rcu` since the // address is read from the page table node. - let node_ref = unsafe { PageTableNodeRef::<'rcu, E, C>::borrow_paddr(cur_pt_addr) }; + let node_ref = unsafe { PageTableNodeRef::<'rcu, C>::borrow_paddr(cur_pt_addr) }; node_ref.lock(guard) }); if *pt_guard.stray_mut() { @@ -178,9 +170,9 @@ fn try_traverse_and_lock_subtree_root< /// must be within the range of the `cur_node`. The range must not be empty. /// /// The function will forget all the [`PageTableGuard`] objects in the sub-tree. -fn dfs_acquire_lock( +fn dfs_acquire_lock( guard: &dyn InAtomicMode, - cur_node: &mut PageTableGuard<'_, E, C>, + cur_node: &mut PageTableGuard<'_, C>, cur_node_va: Vaddr, va_range: Range, ) { @@ -215,9 +207,9 @@ fn dfs_acquire_lock( /// /// The caller must ensure that the nodes in the specified sub-tree are locked /// and all guards are forgotten. -unsafe fn dfs_release_lock<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait>( +unsafe fn dfs_release_lock<'rcu, C: PageTableConfig>( guard: &'rcu dyn InAtomicMode, - mut cur_node: PageTableGuard<'rcu, E, C>, + mut cur_node: PageTableGuard<'rcu, C>, cur_node_va: Vaddr, va_range: Range, ) { @@ -261,9 +253,9 @@ unsafe fn dfs_release_lock<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait>( /// /// This function must not be called upon a shared node, e.g., the second- /// top level nodes that the kernel space and user space share. -pub(super) unsafe fn dfs_mark_stray_and_unlock( +pub(super) unsafe fn dfs_mark_stray_and_unlock( rcu_guard: &dyn InAtomicMode, - mut sub_tree: PageTableGuard, + mut sub_tree: PageTableGuard, ) -> usize { *sub_tree.stray_mut() = true; diff --git a/ostd/src/mm/page_table/cursor/mod.rs b/ostd/src/mm/page_table/cursor/mod.rs index 5e2dd1f20..8c86525fd 100644 --- a/ostd/src/mm/page_table/cursor/mod.rs +++ b/ostd/src/mm/page_table/cursor/mod.rs @@ -34,9 +34,9 @@ use core::{any::TypeId, fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops: use align_ext::AlignExt; use super::{ - page_size, pte_index, Child, Entry, KernelMode, MapTrackingStatus, PageTable, - PageTableEntryTrait, PageTableError, PageTableGuard, PageTableMode, PagingConstsTrait, - PagingLevel, UserMode, + is_valid_range, page_size, pte_index, Child, Entry, KernelPtConfig, MapTrackingStatus, + PageTable, PageTableConfig, PageTableError, PageTableGuard, PagingConstsTrait, PagingLevel, + UserPtConfig, }; use crate::{ mm::{ @@ -54,12 +54,12 @@ use crate::{ /// A cursor is able to move to the next slot, to read page properties, /// and even to jump to a virtual address directly. #[derive(Debug)] -pub struct Cursor<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> { +pub struct Cursor<'rcu, C: PageTableConfig> { /// The current path of the cursor. /// /// The level 1 page table lock guard is at index 0, and the level N page /// table lock guard is at index N - 1. - path: [Option>; MAX_NR_LEVELS], + path: [Option>; MAX_NR_LEVELS], /// The cursor should be used in a RCU read side critical section. rcu_guard: &'rcu dyn InAtomicMode, /// The level of the page table that the cursor currently points to. @@ -72,7 +72,7 @@ pub struct Cursor<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConst va: Vaddr, /// The virtual address range that is locked. barrier_va: Range, - _phantom: PhantomData<&'rcu PageTable>, + _phantom: PhantomData<&'rcu PageTable>, } /// The maximum value of `PagingConstsTrait::NR_LEVELS`. @@ -106,18 +106,18 @@ pub enum PageTableItem { }, } -impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Cursor<'rcu, M, E, C> { +impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> { /// Creates a cursor claiming exclusive access over the given range. /// /// The cursor created will only be able to query or jump within the given /// range. Out-of-bound accesses will result in panics or errors as return values, /// depending on the access method. pub fn new( - pt: &'rcu PageTable, + pt: &'rcu PageTable, guard: &'rcu dyn InAtomicMode, va: &Range, ) -> Result { - if !M::covers(va) || va.is_empty() { + if !is_valid_range::(va) || va.is_empty() { return Err(PageTableError::InvalidVaddrRange(va.start, va.end)); } if va.start % C::BASE_PAGE_SIZE != 0 || va.end % C::BASE_PAGE_SIZE != 0 { @@ -125,8 +125,7 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Curso } const { assert!(C::NR_LEVELS as usize <= MAX_NR_LEVELS) }; - - let new_pt_is_tracked = if should_map_as_tracked::(va.start) { + let new_pt_is_tracked = if should_map_as_tracked::(va.start) { MapTrackingStatus::Tracked } else { MapTrackingStatus::Untracked @@ -325,15 +324,15 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Curso } /// Goes down a level to a child page table. - fn push_level(&mut self, child_guard: PageTableGuard<'rcu, E, C>) { + fn push_level(&mut self, child_pt: PageTableGuard<'rcu, C>) { self.level -= 1; - debug_assert_eq!(self.level, child_guard.level()); + debug_assert_eq!(self.level, child_pt.level()); - let old = self.path[self.level as usize - 1].replace(child_guard); + let old = self.path[self.level as usize - 1].replace(child_pt); debug_assert!(old.is_none()); } - fn cur_entry(&mut self) -> Entry<'_, 'rcu, E, C> { + fn cur_entry(&mut self) -> Entry<'_, 'rcu, C> { let node = self.path[self.level as usize - 1].as_mut().unwrap(); node.entry(pte_index::(self.va, self.level)) } @@ -346,15 +345,13 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Curso } } -impl Drop for Cursor<'_, M, E, C> { +impl Drop for Cursor<'_, C> { fn drop(&mut self) { locking::unlock_range(self); } } -impl Iterator - for Cursor<'_, M, E, C> -{ +impl Iterator for Cursor<'_, C> { type Item = PageTableItem; fn next(&mut self) -> Option { @@ -373,20 +370,16 @@ impl Iterator /// in a page table can only be accessed by one cursor, regardless of the /// mutability of the cursor. #[derive(Debug)] -pub struct CursorMut<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( - Cursor<'rcu, M, E, C>, -); +pub struct CursorMut<'rcu, C: PageTableConfig>(Cursor<'rcu, C>); -impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> - CursorMut<'rcu, M, E, C> -{ +impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> { /// Creates a cursor claiming exclusive access over the given range. /// /// The cursor created will only be able to map, query or jump within the given /// range. Out-of-bound accesses will result in panics or errors as return values, /// depending on the access method. pub(super) fn new( - pt: &'rcu PageTable, + pt: &'rcu PageTable, guard: &'rcu dyn InAtomicMode, va: &Range, ) -> Result { @@ -452,7 +445,7 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> || self.0.va % page_size::(self.0.level) != 0 || self.0.va + page_size::(self.0.level) > end { - debug_assert!(should_map_as_tracked::(self.0.va)); + debug_assert!(should_map_as_tracked::(self.0.va)); let mut cur_entry = self.0.cur_entry(); match cur_entry.to_ref() { Child::PageTableRef(pt) => { @@ -537,8 +530,8 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> // We ensure not mapping in reserved kernel shared tables or releasing it. // Although it may be an invariant for all architectures and will be optimized // out by the compiler since `C::NR_LEVELS - 1 > C::HIGHEST_TRANSLATION_LEVEL`. - let is_kernel_shared_node = - TypeId::of::() == TypeId::of::() && self.0.level >= C::NR_LEVELS - 1; + let is_kernel_shared_node = TypeId::of::() == TypeId::of::() + && self.0.level >= C::NR_LEVELS - 1; if self.0.level > C::HIGHEST_TRANSLATION_LEVEL || is_kernel_shared_node || self.0.va % page_size::(self.0.level) != 0 @@ -572,10 +565,9 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> continue; } - let level = self.0.level; - // Map the current page. - debug_assert!(!should_map_as_tracked::(self.0.va)); + debug_assert!(!should_map_as_tracked::(self.0.va)); + let level = self.0.level; let mut cur_entry = self.0.cur_entry(); let _ = cur_entry.replace(Child::Untracked(pa, level, prop)); @@ -636,7 +628,7 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> } Child::PageTable(pt) => { assert!( - !(TypeId::of::() == TypeId::of::() + !(TypeId::of::() == TypeId::of::() && self.0.level == C::NR_LEVELS), "Unmapping shared kernel page table nodes" ); @@ -706,8 +698,8 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> } } -fn should_map_as_tracked(va: Vaddr) -> bool { - (TypeId::of::() == TypeId::of::() - || TypeId::of::() == TypeId::of::()) +fn should_map_as_tracked(va: Vaddr) -> bool { + TypeId::of::() == TypeId::of::() && crate::mm::kspace::should_map_as_tracked(va) + || TypeId::of::() == TypeId::of::() } diff --git a/ostd/src/mm/page_table/mod.rs b/ostd/src/mm/page_table/mod.rs index dbd8f74bc..cbeb75bdb 100644 --- a/ostd/src/mm/page_table/mod.rs +++ b/ostd/src/mm/page_table/mod.rs @@ -3,14 +3,13 @@ use core::{ fmt::Debug, intrinsics::transmute_unchecked, - marker::PhantomData, - ops::Range, + ops::{Range, RangeInclusive}, sync::atomic::{AtomicUsize, Ordering}, }; use super::{ - nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait, PagingLevel, - PodOnce, Vaddr, + kspace::KernelPtConfig, nr_subpage_per_huge, page_prop::PageProperty, page_size, + vm_space::UserPtConfig, Paddr, PagingConstsTrait, PagingLevel, PodOnce, Vaddr, }; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, @@ -39,31 +38,93 @@ pub enum PageTableError { UnalignedVaddr, } -/// This is a compile-time technique to force the frame developers to distinguish -/// between the kernel global page table instance, process specific user page table -/// instance, and device page table instances. -pub trait PageTableMode: Clone + Debug + 'static { - /// The range of virtual addresses that the page table can manage. - const VADDR_RANGE: Range; +/// The configurations of a page table. +/// +/// It abstracts away both the usage and the architecture specifics from the +/// general page table implementation. For examples: +/// - the managed virtual address range; +/// - the trackedness of physical mappings; +/// - the PTE layout; +/// - the number of page table levels, etc. +pub(crate) trait PageTableConfig: Clone + Debug + Send + Sync + 'static { + /// The index range at the top level (`C::NR_LEVELS`) page table. + /// + /// When configured with this value, the [`PageTable`] instance will only + /// be allowed to manage the virtual address range that is covered by + /// this range. The range can be smaller than the actual allowed range + /// specified by the hardware MMU (limited by `C::ADDRESS_WIDTH`). + const TOP_LEVEL_INDEX_RANGE: Range; - /// Check if the given range is covered by the valid virtual address range. - fn covers(r: &Range) -> bool { - Self::VADDR_RANGE.start <= r.start && r.end <= Self::VADDR_RANGE.end + type E: PageTableEntryTrait; + type C: PagingConstsTrait; +} + +// Implement it so that we can comfortably use low level functions +// like `page_size::` without typing `C::C` everywhere. +impl PagingConstsTrait for C { + const BASE_PAGE_SIZE: usize = C::C::BASE_PAGE_SIZE; + const NR_LEVELS: PagingLevel = C::C::NR_LEVELS; + const HIGHEST_TRANSLATION_LEVEL: PagingLevel = C::C::HIGHEST_TRANSLATION_LEVEL; + const PTE_SIZE: usize = C::C::PTE_SIZE; + const ADDRESS_WIDTH: usize = C::C::ADDRESS_WIDTH; + const VA_SIGN_EXT: bool = C::C::VA_SIGN_EXT; +} + +/// Gets the managed virtual addresses range for the page table. +/// +/// It returns a [`RangeInclusive`] because the end address, if being +/// [`Vaddr::MAX`], overflows [`Range`]. +const fn vaddr_range() -> RangeInclusive { + const fn top_level_index_width() -> usize { + C::ADDRESS_WIDTH - pte_index_bit_offset::(C::NR_LEVELS) } + + const { + assert!(C::TOP_LEVEL_INDEX_RANGE.start < C::TOP_LEVEL_INDEX_RANGE.end); + assert!(top_level_index_width::() <= nr_pte_index_bits::(),); + assert!(C::TOP_LEVEL_INDEX_RANGE.start < 1 << top_level_index_width::()); + assert!(C::TOP_LEVEL_INDEX_RANGE.end <= 1 << top_level_index_width::()); + }; + + const fn pt_va_range_start() -> Vaddr { + C::TOP_LEVEL_INDEX_RANGE.start << pte_index_bit_offset::(C::NR_LEVELS) + } + + const fn pt_va_range_end() -> Vaddr { + C::TOP_LEVEL_INDEX_RANGE + .end + .unbounded_shl(pte_index_bit_offset::(C::NR_LEVELS) as u32) + .wrapping_sub(1) // Inclusive end. + } + + const fn sign_bit_of_va(va: Vaddr) -> bool { + (va >> (C::ADDRESS_WIDTH - 1)) & 1 != 0 + } + + let mut start = pt_va_range_start::(); + let mut end = pt_va_range_end::(); + + if C::VA_SIGN_EXT { + const { + assert!( + sign_bit_of_va::(pt_va_range_start::()) + == sign_bit_of_va::(pt_va_range_end::()) + ) + } + + if sign_bit_of_va::(pt_va_range_start::()) { + start |= !0 ^ ((1 << C::ADDRESS_WIDTH) - 1); + end |= !0 ^ ((1 << C::ADDRESS_WIDTH) - 1); + } + } + + start..=end } -#[derive(Clone, Debug)] -pub struct UserMode {} - -impl PageTableMode for UserMode { - const VADDR_RANGE: Range = 0..super::MAX_USERSPACE_VADDR; -} - -#[derive(Clone, Debug)] -pub struct KernelMode {} - -impl PageTableMode for KernelMode { - const VADDR_RANGE: Range = super::KERNEL_VADDR_RANGE; +/// Check if the given range is covered by the valid range of the page table. +const fn is_valid_range(r: &Range) -> bool { + let va_range = vaddr_range::(); + *va_range.start() <= r.start && (r.end == 0 || r.end - 1 <= *va_range.end()) } // Here are some const values that are determined by the paging constants. @@ -75,25 +136,28 @@ const fn nr_pte_index_bits() -> usize { /// The index of a VA's PTE in a page table node at the given level. const fn pte_index(va: Vaddr, level: PagingLevel) -> usize { - (va >> (C::BASE_PAGE_SIZE.ilog2() as usize + nr_pte_index_bits::() * (level as usize - 1))) - & (nr_subpage_per_huge::() - 1) + (va >> pte_index_bit_offset::(level)) & (nr_subpage_per_huge::() - 1) +} + +/// The bit offset of the entry offset part in a virtual address. +/// +/// This function returns the bit offset of the least significant bit. Take +/// x86-64 as an example, the `pte_index_bit_offset(2)` should return 21, which +/// is 12 (the 4KiB in-page offset) plus 9 (index width in the level-1 table). +const fn pte_index_bit_offset(level: PagingLevel) -> usize { + C::BASE_PAGE_SIZE.ilog2() as usize + nr_pte_index_bits::() * (level as usize - 1) } /// A handle to a page table. /// A page table can track the lifetime of the mapped physical pages. #[derive(Debug)] -pub struct PageTable< - M: PageTableMode, - E: PageTableEntryTrait = PageTableEntry, - C: PagingConstsTrait = PagingConsts, -> { - root: PageTableNode, - _phantom: PhantomData, +pub struct PageTable { + root: PageTableNode, } -impl PageTable { +impl PageTable { pub fn activate(&self) { - // SAFETY: The usermode page table is safe to activate since the kernel + // SAFETY: The user mode page table is safe to activate since the kernel // mappings are shared. unsafe { self.root.activate(); @@ -101,7 +165,7 @@ impl PageTable { } } -impl PageTable { +impl PageTable { /// Create a new kernel page table. pub(crate) fn new_kernel_page_table() -> Self { let kpt = Self::empty(); @@ -111,10 +175,7 @@ impl PageTable { let preempt_guard = disable_preempt(); let mut root_node = kpt.root.borrow().lock(&preempt_guard); - const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::(); - let kernel_space_range = NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE; - - for i in kernel_space_range { + for i in KernelPtConfig::TOP_LEVEL_INDEX_RANGE { let mut root_entry = root_node.entry(i); let is_tracked = if super::kspace::should_map_as_tracked( i * page_size::(PagingConsts::NR_LEVELS - 1), @@ -136,7 +197,7 @@ impl PageTable { /// /// This should be the only way to create the user page table, that is to /// duplicate the kernel page table with all the kernel mappings shared. - pub fn create_user_page_table(&self) -> PageTable { + pub(in crate::mm) fn create_user_page_table(&'static self) -> PageTable { let new_root = PageTableNode::alloc(PagingConsts::NR_LEVELS, MapTrackingStatus::NotApplicable); @@ -144,28 +205,25 @@ impl PageTable { let mut root_node = self.root.borrow().lock(&preempt_guard); let mut new_node = new_root.borrow().lock(&preempt_guard); - // Make a shallow copy of the root node in the kernel space range. - // The user space range is not copied. - const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::(); - - for i in NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE { + for i in KernelPtConfig::TOP_LEVEL_INDEX_RANGE { let root_entry = root_node.entry(i); let child = root_entry.to_ref(); let Child::PageTableRef(pt) = child else { panic!("The kernel page table doesn't contain shared nodes"); }; - let pt_cloned = pt.clone(); - let _ = new_node - .entry(i) - .replace(Child::PageTable(crate::sync::RcuDrop::new(pt_cloned))); + // We do not add additional reference count specifically for the + // shared kernel page tables. It requires user page tables to + // outlive the kernel page table, which is trivially true. + // See also `::on_drop`. + let pt_addr = pt.start_paddr(); + let pte = PageTableEntry::new_pt(pt_addr); + // SAFETY: The index is within the bounds and the new PTE is compatible. + unsafe { new_node.write_pte(i, pte) }; } drop(new_node); - PageTable:: { - root: new_root, - _phantom: PhantomData, - } + PageTable:: { root: new_root } } /// Protect the given virtual address range in the kernel page table. @@ -193,14 +251,13 @@ impl PageTable { } } -impl PageTable { +impl PageTable { /// Create a new empty page table. /// /// Useful for the IOMMU page tables only. pub fn empty() -> Self { PageTable { - root: PageTableNode::::alloc(C::NR_LEVELS, MapTrackingStatus::NotApplicable), - _phantom: PhantomData, + root: PageTableNode::::alloc(C::NR_LEVELS, MapTrackingStatus::NotApplicable), } } @@ -239,7 +296,7 @@ impl PageTable Option<(Paddr, PageProperty)> { // SAFETY: The root node is a valid page table node so the address is valid. - unsafe { page_walk::(self.root_paddr(), vaddr) } + unsafe { page_walk::(self.root_paddr(), vaddr) } } /// Create a new cursor exclusively accessing the virtual address range for mapping. @@ -250,7 +307,7 @@ impl PageTable, - ) -> Result, PageTableError> { + ) -> Result, PageTableError> { CursorMut::new(self, guard.as_atomic_mode_guard(), va) } @@ -263,7 +320,7 @@ impl PageTable, - ) -> Result, PageTableError> { + ) -> Result, PageTableError> { Cursor::new(self, guard.as_atomic_mode_guard(), va) } @@ -273,7 +330,6 @@ impl PageTable Self { PageTable { root: self.root.clone(), - _phantom: PhantomData, } } } @@ -297,7 +353,7 @@ impl PageTable( +pub(super) unsafe fn page_walk( root_paddr: Paddr, vaddr: Vaddr, ) -> Option<(Paddr, PageProperty)> { @@ -310,7 +366,7 @@ pub(super) unsafe fn page_walk( let node_addr = paddr_to_vaddr(root_paddr); let offset = pte_index::(vaddr, cur_level); // SAFETY: The offset does not exceed the value of PAGE_SIZE. - unsafe { (node_addr as *const E).add(offset).read() } + unsafe { (node_addr as *const C::E).add(offset).read() } }; while cur_level > 1 { @@ -328,7 +384,7 @@ pub(super) unsafe fn page_walk( let node_addr = paddr_to_vaddr(cur_pte.paddr()); let offset = pte_index::(vaddr, cur_level); // SAFETY: The offset does not exceed the value of PAGE_SIZE. - unsafe { (node_addr as *const E).add(offset).read() } + unsafe { (node_addr as *const C::E).add(offset).read() } }; } diff --git a/ostd/src/mm/page_table/node/child.rs b/ostd/src/mm/page_table/node/child.rs index fc3e918df..64fdc6969 100644 --- a/ostd/src/mm/page_table/node/child.rs +++ b/ostd/src/mm/page_table/node/child.rs @@ -4,12 +4,13 @@ use core::{mem::ManuallyDrop, panic}; -use super::{MapTrackingStatus, PageTableEntryTrait, PageTableNode, PageTableNodeRef}; +use super::{MapTrackingStatus, PageTableEntryTrait, PageTableNode}; use crate::{ mm::{ frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame}, page_prop::PageProperty, - Paddr, PagingConstsTrait, PagingLevel, + page_table::{PageTableConfig, PageTableNodeRef}, + Paddr, PagingLevel, }, sync::RcuDrop, }; @@ -17,11 +18,11 @@ use crate::{ /// A child of a page table node. // TODO: Distinguish between the reference and the owning child. #[derive(Debug)] -pub(in crate::mm) enum Child<'a, E: PageTableEntryTrait, C: PagingConstsTrait> { +pub(in crate::mm) enum Child<'a, C: PageTableConfig> { /// A owning handle to a raw page table node. - PageTable(RcuDrop>), + PageTable(RcuDrop>), /// A reference of a child page table node. - PageTableRef(PageTableNodeRef<'a, E, C>), + PageTableRef(PageTableNodeRef<'a, C>), /// A mapped frame. Frame(Frame, PageProperty), /// Mapped frames that are not tracked by handles. @@ -29,7 +30,7 @@ pub(in crate::mm) enum Child<'a, E: PageTableEntryTrait, C: PagingConstsTrait> { None, } -impl Child<'_, E, C> { +impl Child<'_, C> { /// Returns whether the child does not map to anything. pub(in crate::mm) fn is_none(&self) -> bool { matches!(self, Child::None) @@ -66,21 +67,21 @@ impl Child<'_, E, C> { /// Usually this is for recording the PTE into a page table node. When the /// child is needed again by reading the PTE of a page table node, extra /// information should be provided using the [`Child::from_pte`] method. - pub(super) fn into_pte(self) -> E { + pub(super) fn into_pte(self) -> C::E { match self { Child::PageTable(pt) => { let pt = ManuallyDrop::new(pt); - E::new_pt(pt.start_paddr()) + C::E::new_pt(pt.start_paddr()) } Child::PageTableRef(_) => { panic!("`PageTableRef` should not be converted to PTE"); } Child::Frame(page, prop) => { let level = page.map_level(); - E::new_page(page.into_raw(), level, prop) + C::E::new_page(page.into_raw(), level, prop) } - Child::Untracked(pa, level, prop) => E::new_page(pa, level, prop), - Child::None => E::new_absent(), + Child::Untracked(pa, level, prop) => C::E::new_page(pa, level, prop), + Child::None => C::E::new_absent(), } } @@ -97,7 +98,7 @@ impl Child<'_, E, C> { /// This method should be only used no more than once for a PTE that has /// been converted from a child using the [`Child::into_pte`] method. pub(super) unsafe fn from_pte( - pte: E, + pte: C::E, level: PagingLevel, is_tracked: MapTrackingStatus, ) -> Self { @@ -141,7 +142,7 @@ impl Child<'_, E, C> { /// This method must not be used with a PTE that has been restored to a /// child using the [`Child::from_pte`] method. pub(super) unsafe fn ref_from_pte( - pte: &E, + pte: &C::E, level: PagingLevel, is_tracked: MapTrackingStatus, ) -> Self { diff --git a/ostd/src/mm/page_table/node/entry.rs b/ostd/src/mm/page_table/node/entry.rs index 48b1b834c..9ed9db418 100644 --- a/ostd/src/mm/page_table/node/entry.rs +++ b/ostd/src/mm/page_table/node/entry.rs @@ -4,11 +4,14 @@ use core::mem::ManuallyDrop; -use super::{ - Child, MapTrackingStatus, PageTableEntryTrait, PageTableGuard, PageTableNode, PageTableNodeRef, -}; +use super::{Child, MapTrackingStatus, PageTableEntryTrait, PageTableGuard, PageTableNode}; use crate::{ - mm::{nr_subpage_per_huge, page_prop::PageProperty, page_size, PagingConstsTrait}, + mm::{ + nr_subpage_per_huge, + page_prop::PageProperty, + page_size, + page_table::{PageTableConfig, PageTableNodeRef}, + }, sync::RcuDrop, task::atomic_mode::InAtomicMode, }; @@ -20,7 +23,7 @@ use crate::{ /// This is a static reference to an entry in a node that does not account for /// a dynamic reference count to the child. It can be used to create a owned /// handle, which is a [`Child`]. -pub(in crate::mm) struct Entry<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> { +pub(in crate::mm) struct Entry<'a, 'rcu, C: PageTableConfig> { /// The page table entry. /// /// We store the page table entry here to optimize the number of reads from @@ -28,14 +31,14 @@ pub(in crate::mm) struct Entry<'a, 'rcu, E: PageTableEntryTrait, C: PagingConsts /// other CPUs may modify the memory location for accessed/dirty bits. Such /// accesses will violate the aliasing rules of Rust and cause undefined /// behaviors. - pte: E, + pte: C::E, /// The index of the entry in the node. idx: usize, /// The node that contains the entry. - node: &'a mut PageTableGuard<'rcu, E, C>, + node: &'a mut PageTableGuard<'rcu, C>, } -impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, C> { +impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> { /// Returns if the entry does not map to anything. pub(in crate::mm) fn is_none(&self) -> bool { !self.pte.is_present() @@ -47,7 +50,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, } /// Gets a reference to the child. - pub(in crate::mm) fn to_ref(&self) -> Child<'rcu, E, C> { + pub(in crate::mm) fn to_ref(&self) -> Child<'rcu, C> { // SAFETY: The entry structure represents an existent entry with the // right node information. unsafe { Child::ref_from_pte(&self.pte, self.node.level(), self.node.is_tracked()) } @@ -87,7 +90,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, /// /// The method panics if the given child is not compatible with the node. /// The compatibility is specified by the [`Child::is_compatible`]. - pub(in crate::mm) fn replace(&mut self, new_child: Child) -> Child { + pub(in crate::mm) fn replace(&mut self, new_child: Child<'rcu, C>) -> Child<'rcu, C> { assert!(new_child.is_compatible(self.node.level(), self.node.is_tracked())); // SAFETY: The entry structure represents an existent entry with the @@ -122,13 +125,13 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, &mut self, guard: &'rcu dyn InAtomicMode, new_pt_is_tracked: MapTrackingStatus, - ) -> Option> { + ) -> Option> { if !(self.is_none() && self.node.level() > 1) { return None; } let level = self.node.level(); - let new_page = PageTableNode::::alloc(level - 1, new_pt_is_tracked); + let new_page = PageTableNode::::alloc(level - 1, new_pt_is_tracked); let paddr = new_page.start_paddr(); let _ = ManuallyDrop::new(new_page.borrow().lock(guard)); @@ -163,7 +166,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, pub(in crate::mm::page_table) fn split_if_untracked_huge( &mut self, guard: &'rcu dyn InAtomicMode, - ) -> Option> { + ) -> Option> { let level = self.node.level(); if !(self.pte.is_last(level) @@ -176,7 +179,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, let pa = self.pte.paddr(); let prop = self.pte.prop(); - let new_page = PageTableNode::::alloc(level - 1, MapTrackingStatus::Untracked); + let new_page = PageTableNode::::alloc(level - 1, MapTrackingStatus::Untracked); let mut pt_lock_guard = new_page.borrow().lock(guard); for i in 0..nr_subpage_per_huge::() { @@ -211,7 +214,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, /// # Safety /// /// The caller must ensure that the index is within the bounds of the node. - pub(super) unsafe fn new_at(guard: &'a mut PageTableGuard<'rcu, E, C>, idx: usize) -> Self { + pub(super) unsafe fn new_at(guard: &'a mut PageTableGuard<'rcu, C>, idx: usize) -> Self { // SAFETY: The index is within the bound. let pte = unsafe { guard.read_pte(idx) }; Self { diff --git a/ostd/src/mm/page_table/node/mod.rs b/ostd/src/mm/page_table/node/mod.rs index d691f3b8d..5d48954c5 100644 --- a/ostd/src/mm/page_table/node/mod.rs +++ b/ostd/src/mm/page_table/node/mod.rs @@ -36,7 +36,7 @@ use core::{ }; pub(in crate::mm) use self::{child::Child, entry::Entry}; -use super::{nr_subpage_per_huge, PageTableEntryTrait}; +use super::{nr_subpage_per_huge, PageTableConfig, PageTableEntryTrait}; use crate::{ mm::{ frame::{meta::AnyFrameMeta, Frame, FrameRef}, @@ -56,9 +56,9 @@ use crate::{ /// /// [`PageTableNode`] is read-only. To modify the page table node, lock and use /// [`PageTableGuard`]. -pub(super) type PageTableNode = Frame>; +pub(super) type PageTableNode = Frame>; -impl PageTableNode { +impl PageTableNode { pub(super) fn level(&self) -> PagingLevel { self.meta().level } @@ -75,7 +75,7 @@ impl PageTableNode { .alloc_frame_with(meta) .expect("Failed to allocate a page table node"); // The allocated frame is zeroed. Make sure zero is absent PTE. - debug_assert!(E::new_absent().as_bytes().iter().all(|&b| b == 0)); + debug_assert_eq!(C::E::new_absent().as_usize(), 0); frame } @@ -130,15 +130,15 @@ impl PageTableNode { } /// A reference to a page table node. -pub(super) type PageTableNodeRef<'a, E, C> = FrameRef<'a, PageTablePageMeta>; +pub(super) type PageTableNodeRef<'a, C> = FrameRef<'a, PageTablePageMeta>; -impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C> { +impl<'a, C: PageTableConfig> PageTableNodeRef<'a, C> { /// Locks the page table node. /// /// An atomic mode guard is required to /// 1. prevent deadlocks; /// 2. provide a lifetime (`'rcu`) that the nodes are guaranteed to outlive. - pub(super) fn lock<'rcu>(self, _guard: &'rcu dyn InAtomicMode) -> PageTableGuard<'rcu, E, C> + pub(super) fn lock<'rcu>(self, _guard: &'rcu dyn InAtomicMode) -> PageTableGuard<'rcu, C> where 'a: 'rcu, { @@ -151,7 +151,7 @@ impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C core::hint::spin_loop(); } - PageTableGuard::<'rcu, E, C> { inner: self } + PageTableGuard::<'rcu, C> { inner: self } } /// Creates a new [`PageTableGuard`] without checking if the page table lock is held. @@ -165,7 +165,7 @@ impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C pub(super) unsafe fn make_guard_unchecked<'rcu>( self, _guard: &'rcu dyn InAtomicMode, - ) -> PageTableGuard<'rcu, E, C> + ) -> PageTableGuard<'rcu, C> where 'a: 'rcu, { @@ -175,18 +175,18 @@ impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C /// A guard that holds the lock of a page table node. #[derive(Debug)] -pub(super) struct PageTableGuard<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> { - inner: PageTableNodeRef<'rcu, E, C>, +pub(super) struct PageTableGuard<'rcu, C: PageTableConfig> { + inner: PageTableNodeRef<'rcu, C>, } -impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E, C> { +impl<'rcu, C: PageTableConfig> PageTableGuard<'rcu, C> { /// Borrows an entry in the node at a given index. /// /// # Panics /// /// Panics if the index is not within the bound of /// [`nr_subpage_per_huge`]. - pub(super) fn entry(&mut self, idx: usize) -> Entry<'_, 'rcu, E, C> { + pub(super) fn entry(&mut self, idx: usize) -> Entry<'_, 'rcu, C> { assert!(idx < nr_subpage_per_huge::()); // SAFETY: The index is within the bound. unsafe { Entry::new_at(self, idx) } @@ -213,9 +213,9 @@ impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E, /// # Safety /// /// The caller must ensure that the index is within the bound. - unsafe fn read_pte(&self, idx: usize) -> E { + pub(super) unsafe fn read_pte(&self, idx: usize) -> C::E { debug_assert!(idx < nr_subpage_per_huge::()); - let ptr = paddr_to_vaddr(self.start_paddr()) as *mut E; + let ptr = paddr_to_vaddr(self.start_paddr()) as *mut C::E; // SAFETY: // - The page table node is alive. The index is inside the bound, so the page table entry is valid. // - All page table entries are aligned and accessed with atomic operations only. @@ -235,9 +235,9 @@ impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E, /// 1. The index must be within the bound; /// 2. The PTE must represent a child compatible with this page table node /// (see [`Child::is_compatible`]). - unsafe fn write_pte(&mut self, idx: usize, pte: E) { + pub(super) unsafe fn write_pte(&mut self, idx: usize, pte: C::E) { debug_assert!(idx < nr_subpage_per_huge::()); - let ptr = paddr_to_vaddr(self.start_paddr()) as *mut E; + let ptr = paddr_to_vaddr(self.start_paddr()) as *mut C::E; // SAFETY: // - The page table node is alive. The index is inside the bound, so the page table entry is valid. // - All page table entries are aligned and accessed with atomic operations only. @@ -251,15 +251,15 @@ impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E, } } -impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Deref for PageTableGuard<'rcu, E, C> { - type Target = PageTableNodeRef<'rcu, E, C>; +impl<'rcu, C: PageTableConfig> Deref for PageTableGuard<'rcu, C> { + type Target = PageTableNodeRef<'rcu, C>; fn deref(&self) -> &Self::Target { &self.inner } } -impl Drop for PageTableGuard<'_, E, C> { +impl Drop for PageTableGuard<'_, C> { fn drop(&mut self) { self.inner.meta().lock.store(0, Ordering::Release); } @@ -268,7 +268,7 @@ impl Drop for PageTableGuard<'_, E /// The metadata of any kinds of page table pages. /// Make sure the the generic parameters don't effect the memory layout. #[derive(Debug)] -pub(in crate::mm) struct PageTablePageMeta { +pub(in crate::mm) struct PageTablePageMeta { /// The number of valid PTEs. It is mutable if the lock is held. pub nr_children: SyncUnsafeCell, /// If the page table is detached from its parent. @@ -284,7 +284,7 @@ pub(in crate::mm) struct PageTablePageMeta, + _phantom: core::marker::PhantomData, } /// Describe if the physical address recorded in this page table refers to a @@ -303,7 +303,7 @@ pub(in crate::mm) enum MapTrackingStatus { Tracked, } -impl PageTablePageMeta { +impl PageTablePageMeta { pub fn new(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self { Self { nr_children: SyncUnsafeCell::new(0), @@ -318,7 +318,7 @@ impl PageTablePageMeta { // SAFETY: We can read the page table node because the page table pages are // accessed as untyped memory. -unsafe impl AnyFrameMeta for PageTablePageMeta { +unsafe impl AnyFrameMeta for PageTablePageMeta { fn on_drop(&mut self, reader: &mut VmReader) { let nr_children = self.nr_children.get_mut(); @@ -330,13 +330,20 @@ unsafe impl AnyFrameMeta for PageT let is_tracked = self.is_tracked; // Drop the children. - while let Ok(pte) = reader.read_once::() { - // Here if we use directly `Child::from_pte` we would experience a - // 50% increase in the overhead of the `drop` function. It seems that - // Rust is very conservative about inlining and optimizing dead code - // for `unsafe` code. So we manually inline the function here. + let range = if level == C::NR_LEVELS { + C::TOP_LEVEL_INDEX_RANGE.clone() + } else { + 0..nr_subpage_per_huge::() + }; + reader.skip(range.start * size_of::()); + for _ in range { + // Non-atomic read is OK because we have mutable access. + let pte = reader.read_once::().unwrap(); if pte.is_present() { let paddr = pte.paddr(); + // As a fast path, we can ensure that the type of the child frame + // is `Self` if the PTE points to a child page table. Then we don't + // need to check the vtable for the drop method. if !pte.is_last(level) { // SAFETY: The PTE points to a page table node. The ownership // of the child is transferred to the child then dropped. diff --git a/ostd/src/mm/page_table/test.rs b/ostd/src/mm/page_table/test.rs index 5e27c843b..81584aa0b 100644 --- a/ostd/src/mm/page_table/test.rs +++ b/ostd/src/mm/page_table/test.rs @@ -3,7 +3,7 @@ use super::*; use crate::{ mm::{ - kspace::LINEAR_MAPPING_BASE_VADDR, + kspace::{KernelPtConfig, LINEAR_MAPPING_BASE_VADDR}, page_prop::{CachePolicy, PageFlags}, FrameAllocOptions, MAX_USERSPACE_VADDR, PAGE_SIZE, }, @@ -17,14 +17,14 @@ mod test_utils { /// Sets up an empty `PageTable` in the specified mode. #[track_caller] - pub fn setup_page_table() -> PageTable { - PageTable::::empty() + pub fn setup_page_table() -> PageTable { + PageTable::::empty() } /// Maps a range of virtual addresses to physical addresses with specified properties. #[track_caller] - pub fn map_range( - page_table: &PageTable, + pub fn map_range( + page_table: &PageTable, virtual_range: Range, physical_range: Range, page_property: PageProperty, @@ -38,7 +38,7 @@ mod test_utils { /// Unmaps a range of virtual addresses. #[track_caller] - pub fn unmap_range(page_table: &PageTable, range: Range) { + pub fn unmap_range(page_table: &PageTable, range: Range) { let preempt_guard = disable_preempt(); unsafe { page_table @@ -105,13 +105,24 @@ mod test_utils { const NR_LEVELS: PagingLevel = 4; const BASE_PAGE_SIZE: usize = PAGE_SIZE; const ADDRESS_WIDTH: usize = 48; + const VA_SIGN_EXT: bool = true; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 3; const PTE_SIZE: usize = core::mem::size_of::(); } + #[derive(Clone, Debug)] + pub struct TestPtConfig; + + impl PageTableConfig for TestPtConfig { + const TOP_LEVEL_INDEX_RANGE: Range = 0..256; + + type E = PageTableEntry; + type C = VeryHugePagingConsts; + } + /// Applies a protection operation to a range of virtual addresses within a PageTable. - pub fn protect_range( - page_table: &PageTable, + pub fn protect_range( + page_table: &PageTable, range: &Range, mut protect_op: impl FnMut(&mut PageProperty), ) { @@ -135,15 +146,16 @@ mod create_page_table { #[ktest] fn init_user_page_table() { - let user_pt = setup_page_table::(); + let user_pt = setup_page_table::(); + let preempt_guard = disable_preempt(); assert!(user_pt - .cursor(&disable_preempt(), &(0..MAX_USERSPACE_VADDR)) + .cursor(&preempt_guard, &(0..MAX_USERSPACE_VADDR)) .is_ok()); } #[ktest] fn init_kernel_page_table() { - let kernel_pt = setup_page_table::(); + let kernel_pt = setup_page_table::(); assert!(kernel_pt .cursor( &disable_preempt(), @@ -154,7 +166,13 @@ mod create_page_table { #[ktest] fn create_user_page_table() { - let kernel_pt = PageTable::::new_kernel_page_table(); + use spin::Once; + + // To make kernel PT `'static`, required for `create_user_page_table`. + static MOCK_KERNEL_PT: Once> = Once::new(); + MOCK_KERNEL_PT.call_once(PageTable::::new_kernel_page_table); + let kernel_pt = MOCK_KERNEL_PT.get().unwrap(); + let user_pt = kernel_pt.create_user_page_table(); let guard = disable_preempt(); @@ -181,7 +199,7 @@ mod range_checks { #[ktest] fn range_check() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let valid_va = 0..PAGE_SIZE; let invalid_va = 0..(PAGE_SIZE + 1); let kernel_va = LINEAR_MAPPING_BASE_VADDR..(LINEAR_MAPPING_BASE_VADDR + PAGE_SIZE); @@ -197,7 +215,7 @@ mod range_checks { #[ktest] fn boundary_conditions() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let preempt_guard = disable_preempt(); // Tests an empty range. @@ -205,7 +223,7 @@ mod range_checks { assert!(page_table.cursor_mut(&preempt_guard, &empty_range).is_err()); // Tests an out-of-range virtual address. - let out_of_range = MAX_USERSPACE_VADDR..(MAX_USERSPACE_VADDR + PAGE_SIZE); + let out_of_range = 0xffff_8000_0000_0000..0xffff_8000_0001_0000; assert!(page_table .cursor_mut(&preempt_guard, &out_of_range) .is_err()); @@ -219,7 +237,7 @@ mod range_checks { #[ktest] fn maximum_page_table_mapping() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let max_address = 0x100000; let range = 0..max_address; let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); @@ -245,7 +263,7 @@ mod range_checks { #[ktest] fn start_boundary_mapping() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = 0..PAGE_SIZE; let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let frame = FrameAllocOptions::default().alloc_frame().unwrap(); @@ -266,7 +284,7 @@ mod range_checks { #[ktest] fn end_boundary_mapping() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = (MAX_USERSPACE_VADDR - PAGE_SIZE)..MAX_USERSPACE_VADDR; let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let frame = FrameAllocOptions::default().alloc_frame().unwrap(); @@ -288,7 +306,7 @@ mod range_checks { #[ktest] #[should_panic] fn overflow_boundary_mapping() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = (MAX_USERSPACE_VADDR - (PAGE_SIZE / 2))..(MAX_USERSPACE_VADDR + (PAGE_SIZE / 2)); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); @@ -310,7 +328,7 @@ mod page_properties { /// Helper function to map a single page with given properties and verify the properties. #[track_caller] fn check_map_with_property(prop: PageProperty) { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = PAGE_SIZE..(PAGE_SIZE * 2); let frame = FrameAllocOptions::default().alloc_frame().unwrap(); let preempt_guard = disable_preempt(); @@ -328,7 +346,7 @@ mod page_properties { #[ktest] fn uncacheable_policy_mapping() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let virtual_range = PAGE_SIZE..(PAGE_SIZE * 2); let frame = FrameAllocOptions::default().alloc_frame().unwrap(); let preempt_guard = disable_preempt(); @@ -405,7 +423,7 @@ mod different_page_sizes { #[ktest] fn different_page_sizes() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let preempt_guard = disable_preempt(); // 2MiB pages @@ -438,7 +456,7 @@ mod overlapping_mappings { #[ktest] fn overlapping_mappings() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range1 = PAGE_SIZE..(PAGE_SIZE * 2); let range2 = PAGE_SIZE..(PAGE_SIZE * 3); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); @@ -470,7 +488,7 @@ mod overlapping_mappings { #[ktest] #[should_panic] fn unaligned_map() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = (PAGE_SIZE + 512)..(PAGE_SIZE * 2 + 512); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let frame = FrameAllocOptions::default().alloc_frame().unwrap(); @@ -493,8 +511,8 @@ mod navigation { const FIRST_MAP_ADDR: Vaddr = PAGE_SIZE * 7; const SECOND_MAP_ADDR: Vaddr = PAGE_SIZE * 512 * 512; - fn setup_page_table_with_two_frames() -> (PageTable, Frame<()>, Frame<()>) { - let page_table = setup_page_table::(); + fn setup_page_table_with_two_frames() -> (PageTable, Frame<()>, Frame<()>) { + let page_table = setup_page_table::(); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let preempt_guard = disable_preempt(); @@ -583,7 +601,7 @@ mod tracked_mapping { #[ktest] fn tracked_map_unmap() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = PAGE_SIZE..(PAGE_SIZE * 2); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let preempt_guard = disable_preempt(); @@ -627,7 +645,7 @@ mod tracked_mapping { #[ktest] fn remapping_same_range() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = PAGE_SIZE..(PAGE_SIZE * 2); let initial_prop = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let new_prop = PageProperty::new_user(PageFlags::R, CachePolicy::Writeback); @@ -666,7 +684,7 @@ mod untracked_mapping { #[ktest] fn untracked_map_unmap() { - let kernel_pt = setup_page_table::(); + let kernel_pt = setup_page_table::(); let preempt_guard = disable_preempt(); const UNTRACKED_OFFSET: usize = LINEAR_MAPPING_BASE_VADDR; @@ -739,15 +757,13 @@ mod untracked_mapping { #[ktest] fn untracked_large_protect_query() { - let kernel_pt = PageTable::::empty(); + let kernel_pt = PageTable::::empty(); let preempt_guard = disable_preempt(); - const UNTRACKED_OFFSET: usize = crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR; let gmult = 512 * 512; let from_ppn = gmult - 512..gmult + gmult + 514; let to_ppn = gmult - 512 - 512..gmult + gmult - 512 + 514; - let from = UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.start - ..UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.end; + let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end; let to = PAGE_SIZE * to_ppn.start..PAGE_SIZE * to_ppn.end; let mapped_pa_of_va = |va: Vaddr| va - (from.start - to.start); let prop = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); @@ -778,8 +794,8 @@ mod untracked_mapping { } } let protect_ppn_range = from_ppn.start + 18..from_ppn.start + 20; - let protect_va_range = UNTRACKED_OFFSET + PAGE_SIZE * protect_ppn_range.start - ..UNTRACKED_OFFSET + PAGE_SIZE * protect_ppn_range.end; + let protect_va_range = + PAGE_SIZE * protect_ppn_range.start..PAGE_SIZE * protect_ppn_range.end; protect_range(&kernel_pt, &protect_va_range, |p| p.flags -= PageFlags::W); @@ -806,8 +822,8 @@ mod untracked_mapping { { assert_item_is_untracked_map( item, - UNTRACKED_OFFSET + i * PAGE_SIZE, - mapped_pa_of_va(UNTRACKED_OFFSET + i * PAGE_SIZE), + i * PAGE_SIZE, + mapped_pa_of_va(i * PAGE_SIZE), PAGE_SIZE, // Assumes protection splits huge pages if necessary. PageProperty::new_user(PageFlags::R, CachePolicy::Writeback), ); @@ -838,7 +854,7 @@ mod full_unmap_verification { #[ktest] fn full_unmap() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = 0..(PAGE_SIZE * 100); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let preempt_guard = disable_preempt(); @@ -880,7 +896,7 @@ mod protection_and_query { #[ktest] fn base_protect_query() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let from_ppn = 1..1000; let virtual_range = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end; let preempt_guard = disable_preempt(); @@ -931,7 +947,7 @@ mod protection_and_query { #[ktest] fn test_protect_next_empty_entry() { - let page_table = PageTable::::empty(); + let page_table = PageTable::::empty(); let range = 0x1000..0x2000; let preempt_guard = disable_preempt(); @@ -946,7 +962,7 @@ mod protection_and_query { #[ktest] fn test_protect_next_child_table_with_children() { - let page_table = setup_page_table::(); + let page_table = setup_page_table::(); let range = 0x1000..0x3000; // Range potentially spanning intermediate tables let preempt_guard = disable_preempt(); @@ -998,7 +1014,7 @@ mod boot_pt { // Confirms the mapping using page_walk. let root_paddr = boot_pt.root_address(); assert_eq!( - unsafe { page_walk::(root_paddr, from_virt + 1) }, + unsafe { page_walk::(root_paddr, from_virt + 1) }, Some((to_phys * PAGE_SIZE + 1, page_property)) ); } @@ -1055,7 +1071,7 @@ mod boot_pt { let prop1 = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); unsafe { boot_pt.map_base_page(from1, to_phys1, prop1) }; assert_eq!( - unsafe { page_walk::(root_paddr, from1 + 1) }, + unsafe { page_walk::(root_paddr, from1 + 1) }, Some((to_phys1 * PAGE_SIZE + 1, prop1)) ); @@ -1064,7 +1080,7 @@ mod boot_pt { let expected_prop1_protected = PageProperty::new_user(PageFlags::RX, CachePolicy::Writeback); assert_eq!( - unsafe { page_walk::(root_paddr, from1 + 1) }, + unsafe { page_walk::(root_paddr, from1 + 1) }, Some((to_phys1 * PAGE_SIZE + 1, expected_prop1_protected)) ); @@ -1074,7 +1090,7 @@ mod boot_pt { let prop2 = PageProperty::new_user(PageFlags::RX, CachePolicy::Uncacheable); unsafe { boot_pt.map_base_page(from2, to_phys2, prop2) }; assert_eq!( - unsafe { page_walk::(root_paddr, from2 + 2) }, + unsafe { page_walk::(root_paddr, from2 + 2) }, Some((to_phys2 * PAGE_SIZE + 2, prop2)) ); @@ -1083,7 +1099,7 @@ mod boot_pt { let expected_prop2_protected = PageProperty::new_user(PageFlags::RW, CachePolicy::Uncacheable); assert_eq!( - unsafe { page_walk::(root_paddr, from2 + 2) }, + unsafe { page_walk::(root_paddr, from2 + 2) }, Some((to_phys2 * PAGE_SIZE + 2, expected_prop2_protected)) ); } diff --git a/ostd/src/mm/vm_space.rs b/ostd/src/mm/vm_space.rs index 796ef0703..14e9f328c 100644 --- a/ostd/src/mm/vm_space.rs +++ b/ostd/src/mm/vm_space.rs @@ -11,6 +11,7 @@ use core::{ops::Range, sync::atomic::Ordering}; +use super::page_table::PageTableConfig; use crate::{ arch::mm::{current_page_table_paddr, PageTableEntry, PagingConsts}, cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu}, @@ -18,7 +19,7 @@ use crate::{ mm::{ io::Fallible, kspace::KERNEL_PAGE_TABLE, - page_table::{self, PageTable, PageTableItem, UserMode}, + page_table::{self, PageTable, PageTableItem}, tlb::{TlbFlushOp, TlbFlusher}, PageProperty, UFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR, }, @@ -64,7 +65,7 @@ use crate::{ /// [`UserMode::execute`]: crate::user::UserMode::execute #[derive(Debug)] pub struct VmSpace { - pt: PageTable, + pt: PageTable, cpus: AtomicCpuSet, } @@ -198,7 +199,7 @@ impl Default for VmSpace { /// It exclusively owns a sub-tree of the page table, preventing others from /// reading or modifying the same sub-tree. Two read-only cursors can not be /// created from the same virtual address range either. -pub struct Cursor<'a>(page_table::Cursor<'a, UserMode, PageTableEntry, PagingConsts>); +pub struct Cursor<'a>(page_table::Cursor<'a, UserPtConfig>); impl Iterator for Cursor<'_> { type Item = VmItem; @@ -245,7 +246,7 @@ impl Cursor<'_> { /// It exclusively owns a sub-tree of the page table, preventing others from /// reading or modifying the same sub-tree. pub struct CursorMut<'a> { - pt_cursor: page_table::CursorMut<'a, UserMode, PageTableEntry, PagingConsts>, + pt_cursor: page_table::CursorMut<'a, UserPtConfig>, // We have a read lock so the CPU set in the flusher is always a superset // of actual activated CPUs. flusher: TlbFlusher<'a, DisabledPreemptGuard>, @@ -476,3 +477,13 @@ impl TryFrom for VmItem { } } } + +#[derive(Clone, Debug)] +pub(crate) struct UserPtConfig {} + +impl PageTableConfig for UserPtConfig { + const TOP_LEVEL_INDEX_RANGE: Range = 0..256; + + type E = PageTableEntry; + type C = PagingConsts; +}