Unify page table template parameters

This commit is contained in:
Zhang Junyang
2025-04-29 18:16:06 +08:00
committed by Tate, Hongliang Tian
parent 22ccfb1f2b
commit 2c917ba383
16 changed files with 364 additions and 256 deletions

View File

@ -22,6 +22,7 @@ impl PagingConstsTrait for PagingConsts {
const BASE_PAGE_SIZE: usize = 4096; const BASE_PAGE_SIZE: usize = 4096;
const NR_LEVELS: PagingLevel = 4; const NR_LEVELS: PagingLevel = 4;
const ADDRESS_WIDTH: usize = 48; const ADDRESS_WIDTH: usize = 48;
const VA_SIGN_EXT: bool = true;
const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 4; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 4;
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>(); const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
} }

View File

@ -8,7 +8,7 @@ use core::mem::size_of;
use log::trace; use log::trace;
use ostd_pod::Pod; use ostd_pod::Pod;
use super::second_stage::{DeviceMode, PageTableEntry, PagingConsts}; use super::second_stage::IommuPtConfig;
use crate::{ use crate::{
bus::pci::PciDeviceLocation, bus::pci::PciDeviceLocation,
mm::{ mm::{
@ -107,7 +107,7 @@ impl RootTable {
pub(super) fn specify_device_page_table( pub(super) fn specify_device_page_table(
&mut self, &mut self,
device_id: PciDeviceLocation, device_id: PciDeviceLocation,
page_table: PageTable<DeviceMode, PageTableEntry, PagingConsts>, page_table: PageTable<IommuPtConfig>,
) { ) {
let context_table = self.get_or_create_context_table(device_id); let context_table = self.get_or_create_context_table(device_id);
@ -241,7 +241,7 @@ pub enum AddressWidth {
pub struct ContextTable { pub struct ContextTable {
/// Total 32 devices, each device has 8 functions. /// Total 32 devices, each device has 8 functions.
entries_frame: Frame<()>, entries_frame: Frame<()>,
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>, page_tables: BTreeMap<Paddr, PageTable<IommuPtConfig>>,
} }
impl ContextTable { impl ContextTable {
@ -259,7 +259,7 @@ impl ContextTable {
fn get_or_create_page_table( fn get_or_create_page_table(
&mut self, &mut self,
device: PciDeviceLocation, device: PciDeviceLocation,
) -> &mut PageTable<DeviceMode, PageTableEntry, PagingConsts> { ) -> &mut PageTable<IommuPtConfig> {
let bus_entry = self let bus_entry = self
.entries_frame .entries_frame
.read_val::<ContextEntry>( .read_val::<ContextEntry>(
@ -268,7 +268,7 @@ impl ContextTable {
.unwrap(); .unwrap();
if !bus_entry.is_present() { if !bus_entry.is_present() {
let table = PageTable::<DeviceMode, PageTableEntry, PagingConsts>::empty(); let table = PageTable::<IommuPtConfig>::empty();
let address = table.root_paddr(); let address = table.root_paddr();
self.page_tables.insert(address, table); self.page_tables.insert(address, table);
let entry = ContextEntry(address as u128 | 3 | 0x1_0000_0000_0000_0000); let entry = ContextEntry(address as u128 | 3 | 0x1_0000_0000_0000_0000);

View File

@ -2,7 +2,7 @@
pub use context_table::RootTable; pub use context_table::RootTable;
use log::{info, warn}; use log::{info, warn};
use second_stage::{DeviceMode, PageTableEntry, PagingConsts}; use second_stage::IommuPtConfig;
use spin::Once; use spin::Once;
use super::IommuError; use super::IommuError;
@ -84,7 +84,7 @@ pub fn init() {
// Memory Region Reporting (RMRR) structures. These regions must be mapped for the hardware or // Memory Region Reporting (RMRR) structures. These regions must be mapped for the hardware or
// firmware to function properly. For more details, see Intel(R) Virtualization Technology for // firmware to function properly. For more details, see Intel(R) Virtualization Technology for
// Directed I/O (Revision 5.0), 3.16 Handling Requests to Reserved System Memory. // Directed I/O (Revision 5.0), 3.16 Handling Requests to Reserved System Memory.
let page_table = PageTable::<DeviceMode, PageTableEntry, PagingConsts>::empty(); let page_table = PageTable::<IommuPtConfig>::empty();
for table in PciDeviceLocation::all() { for table in PciDeviceLocation::all() {
root_table.specify_device_page_table(table, unsafe { page_table.shallow_copy() }) root_table.specify_device_page_table(table, unsafe { page_table.shallow_copy() })
} }

View File

@ -7,8 +7,8 @@ use core::ops::Range;
use crate::{ use crate::{
mm::{ mm::{
page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags}, page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableEntryTrait, PageTableMode}, page_table::{PageTableConfig, PageTableEntryTrait},
Paddr, PageProperty, PagingConstsTrait, PagingLevel, PodOnce, Vaddr, Paddr, PageProperty, PagingConstsTrait, PagingLevel, PodOnce,
}, },
util::marker::SameSizeAs, util::marker::SameSizeAs,
Pod, Pod,
@ -17,20 +17,25 @@ use crate::{
/// The page table used by iommu maps the device address /// The page table used by iommu maps the device address
/// space to the physical address space. /// space to the physical address space.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct DeviceMode {} pub(crate) struct IommuPtConfig {}
impl PageTableMode for DeviceMode { impl PageTableConfig for IommuPtConfig {
/// The device address width we currently support is 39-bit. /// From section 3.6 in "Intel(R) Virtualization Technology for Directed I/O",
const VADDR_RANGE: Range<Vaddr> = 0..0x80_0000_0000; /// only low canonical addresses can be used.
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
type E = PageTableEntry;
type C = PagingConsts;
} }
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
pub(super) struct PagingConsts {} pub(crate) struct PagingConsts {}
impl PagingConstsTrait for PagingConsts { impl PagingConstsTrait for PagingConsts {
const BASE_PAGE_SIZE: usize = 4096; const BASE_PAGE_SIZE: usize = 4096;
const NR_LEVELS: PagingLevel = 3; const NR_LEVELS: PagingLevel = 3;
const ADDRESS_WIDTH: usize = 39; const ADDRESS_WIDTH: usize = 39;
const VA_SIGN_EXT: bool = true;
const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 1; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 1;
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>(); const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
} }

View File

@ -30,6 +30,7 @@ impl PagingConstsTrait for PagingConsts {
const BASE_PAGE_SIZE: usize = 4096; const BASE_PAGE_SIZE: usize = 4096;
const NR_LEVELS: PagingLevel = 4; const NR_LEVELS: PagingLevel = 4;
const ADDRESS_WIDTH: usize = 48; const ADDRESS_WIDTH: usize = 48;
const VA_SIGN_EXT: bool = true;
const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 2; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 2;
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>(); const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
} }

View File

@ -9,6 +9,7 @@
#![feature(core_intrinsics)] #![feature(core_intrinsics)]
#![feature(coroutines)] #![feature(coroutines)]
#![feature(fn_traits)] #![feature(fn_traits)]
#![feature(iter_advance_by)]
#![feature(iter_from_coroutine)] #![feature(iter_from_coroutine)]
#![feature(let_chains)] #![feature(let_chains)]
#![feature(linkage)] #![feature(linkage)]
@ -18,7 +19,7 @@
#![feature(ptr_sub_ptr)] #![feature(ptr_sub_ptr)]
#![feature(sync_unsafe_cell)] #![feature(sync_unsafe_cell)]
#![feature(trait_upcasting)] #![feature(trait_upcasting)]
#![feature(iter_advance_by)] #![feature(unbounded_shifts)]
#![expect(internal_features)] #![expect(internal_features)]
#![no_std] #![no_std]
#![warn(missing_docs)] #![warn(missing_docs)]

View File

@ -51,7 +51,7 @@ use super::{
Frame, Segment, Frame, Segment,
}, },
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
page_table::{KernelMode, PageTable}, page_table::{PageTable, PageTableConfig},
Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE,
}; };
use crate::{ use crate::{
@ -115,12 +115,21 @@ pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool {
!(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr)) !(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr))
} }
#[derive(Clone, Debug)]
pub(crate) struct KernelPtConfig {}
impl PageTableConfig for KernelPtConfig {
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 256..512;
type E = PageTableEntry;
type C = PagingConsts;
}
/// The kernel page table instance. /// The kernel page table instance.
/// ///
/// It manages the kernel mapping of all address spaces by sharing the kernel part. And it /// It manages the kernel mapping of all address spaces by sharing the kernel part. And it
/// is unlikely to be activated. /// is unlikely to be activated.
pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PagingConsts>> = pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelPtConfig>> = Once::new();
Once::new();
/// Initializes the kernel page table. /// Initializes the kernel page table.
/// ///
@ -134,7 +143,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
info!("Initializing the kernel page table"); info!("Initializing the kernel page table");
// Start to initialize the kernel page table. // Start to initialize the kernel page table.
let kpt = PageTable::<KernelMode>::new_kernel_page_table(); let kpt = PageTable::<KernelPtConfig>::new_kernel_page_table();
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
// Do linear mappings for the kernel. // Do linear mappings for the kernel.

View File

@ -49,7 +49,7 @@ pub type PagingLevel = u8;
/// A minimal set of constants that determines the paging system. /// A minimal set of constants that determines the paging system.
/// This provides an abstraction over most paging modes in common architectures. /// This provides an abstraction over most paging modes in common architectures.
pub(crate) trait PagingConstsTrait: Clone + Debug + Default + Send + Sync + 'static { pub(crate) trait PagingConstsTrait: Clone + Debug + Send + Sync + 'static {
/// The smallest page size. /// The smallest page size.
/// This is also the page size at level 1 page tables. /// This is also the page size at level 1 page tables.
const BASE_PAGE_SIZE: usize; const BASE_PAGE_SIZE: usize;
@ -71,6 +71,19 @@ pub(crate) trait PagingConstsTrait: Clone + Debug + Default + Send + Sync + 'sta
/// The address width may be BASE_PAGE_SIZE.ilog2() + NR_LEVELS * IN_FRAME_INDEX_BITS. /// The address width may be BASE_PAGE_SIZE.ilog2() + NR_LEVELS * IN_FRAME_INDEX_BITS.
/// If it is shorter than that, the higher bits in the highest level are ignored. /// If it is shorter than that, the higher bits in the highest level are ignored.
const ADDRESS_WIDTH: usize; const ADDRESS_WIDTH: usize;
/// Whether virtual addresses are sign-extended.
///
/// The sign bit of a [`Vaddr`] is the bit at index [`PagingConstsTrait::ADDRESS_WIDTH`] - 1.
/// If this constant is `true`, bits in [`Vaddr`] that are higher than the sign bit must be
/// equal to the sign bit. If an address violates this rule, both the hardware and OSTD
/// should reject it.
///
/// Otherwise, if this constant is `false`, higher bits must be zero.
///
/// Regardless of sign extension, [`Vaddr`] is always not signed upon calculation.
/// That means, `0xffff_ffff_ffff_0000 < 0xffff_ffff_ffff_0001` is `true`.
const VA_SIGN_EXT: bool;
} }
/// The page size /// The page size

View File

@ -11,21 +11,20 @@ use crate::{
mm::{ mm::{
nr_subpage_per_huge, paddr_to_vaddr, nr_subpage_per_huge, paddr_to_vaddr,
page_table::{ page_table::{
load_pte, page_size, pte_index, Child, MapTrackingStatus, PageTable, load_pte, page_size, pte_index, Child, MapTrackingStatus, PageTable, PageTableConfig,
PageTableEntryTrait, PageTableGuard, PageTableMode, PageTableNodeRef, PageTableEntryTrait, PageTableGuard, PageTableNodeRef, PagingConstsTrait, PagingLevel,
PagingConstsTrait, PagingLevel,
}, },
Vaddr, Vaddr,
}, },
task::atomic_mode::InAtomicMode, task::atomic_mode::InAtomicMode,
}; };
pub(super) fn lock_range<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( pub(super) fn lock_range<'rcu, C: PageTableConfig>(
pt: &'rcu PageTable<M, E, C>, pt: &'rcu PageTable<C>,
guard: &'rcu dyn InAtomicMode, guard: &'rcu dyn InAtomicMode,
va: &Range<Vaddr>, va: &Range<Vaddr>,
new_pt_is_tracked: MapTrackingStatus, new_pt_is_tracked: MapTrackingStatus,
) -> Cursor<'rcu, M, E, C> { ) -> Cursor<'rcu, C> {
// The re-try loop of finding the sub-tree root. // The re-try loop of finding the sub-tree root.
// //
// If we locked a stray node, we need to re-try. Otherwise, although // If we locked a stray node, we need to re-try. Otherwise, although
@ -49,7 +48,7 @@ pub(super) fn lock_range<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: Pagi
let mut path = core::array::from_fn(|_| None); let mut path = core::array::from_fn(|_| None);
path[guard_level as usize - 1] = Some(subtree_root); path[guard_level as usize - 1] = Some(subtree_root);
Cursor::<'rcu, M, E, C> { Cursor::<'rcu, C> {
path, path,
rcu_guard: guard, rcu_guard: guard,
level: guard_level, level: guard_level,
@ -60,9 +59,7 @@ pub(super) fn lock_range<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: Pagi
} }
} }
pub(super) fn unlock_range<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( pub(super) fn unlock_range<C: PageTableConfig>(cursor: &mut Cursor<'_, C>) {
cursor: &mut Cursor<'_, M, E, C>,
) {
for i in (0..cursor.guard_level as usize - 1).rev() { for i in (0..cursor.guard_level as usize - 1).rev() {
if let Some(guard) = cursor.path[i].take() { if let Some(guard) = cursor.path[i].take() {
let _ = ManuallyDrop::new(guard); let _ = ManuallyDrop::new(guard);
@ -92,18 +89,13 @@ pub(super) fn unlock_range<M: PageTableMode, E: PageTableEntryTrait, C: PagingCo
/// If this function founds that a locked node is stray (because of racing with /// If this function founds that a locked node is stray (because of racing with
/// page table recycling), it will return `None`. The caller should retry in /// page table recycling), it will return `None`. The caller should retry in
/// this case to lock the proper node. /// this case to lock the proper node.
fn try_traverse_and_lock_subtree_root< fn try_traverse_and_lock_subtree_root<'rcu, C: PageTableConfig>(
'rcu, pt: &PageTable<C>,
M: PageTableMode,
E: PageTableEntryTrait,
C: PagingConstsTrait,
>(
pt: &PageTable<M, E, C>,
guard: &'rcu dyn InAtomicMode, guard: &'rcu dyn InAtomicMode,
va: &Range<Vaddr>, va: &Range<Vaddr>,
new_pt_is_tracked: MapTrackingStatus, new_pt_is_tracked: MapTrackingStatus,
) -> Option<PageTableGuard<'rcu, E, C>> { ) -> Option<PageTableGuard<'rcu, C>> {
let mut cur_node_guard: Option<PageTableGuard<E, C>> = None; let mut cur_node_guard: Option<PageTableGuard<C>> = None;
let mut cur_pt_addr = pt.root.start_paddr(); let mut cur_pt_addr = pt.root.start_paddr();
for cur_level in (1..=C::NR_LEVELS).rev() { for cur_level in (1..=C::NR_LEVELS).rev() {
let start_idx = pte_index::<C>(va.start, cur_level); let start_idx = pte_index::<C>(va.start, cur_level);
@ -115,7 +107,7 @@ fn try_traverse_and_lock_subtree_root<
break; break;
} }
let cur_pt_ptr = paddr_to_vaddr(cur_pt_addr) as *mut E; let cur_pt_ptr = paddr_to_vaddr(cur_pt_addr) as *mut C::E;
// SAFETY: // SAFETY:
// - The page table node is alive because (1) the root node is alive and // - The page table node is alive because (1) the root node is alive and
// (2) all child nodes cannot be recycled because we're in the RCU critical section. // (2) all child nodes cannot be recycled because we're in the RCU critical section.
@ -136,7 +128,7 @@ fn try_traverse_and_lock_subtree_root<
let mut pt_guard = cur_node_guard.take().unwrap_or_else(|| { let mut pt_guard = cur_node_guard.take().unwrap_or_else(|| {
// SAFETY: The node must be alive for at least `'rcu` since the // SAFETY: The node must be alive for at least `'rcu` since the
// address is read from the page table node. // address is read from the page table node.
let node_ref = unsafe { PageTableNodeRef::<'rcu, E, C>::borrow_paddr(cur_pt_addr) }; let node_ref = unsafe { PageTableNodeRef::<'rcu, C>::borrow_paddr(cur_pt_addr) };
node_ref.lock(guard) node_ref.lock(guard)
}); });
if *pt_guard.stray_mut() { if *pt_guard.stray_mut() {
@ -162,7 +154,7 @@ fn try_traverse_and_lock_subtree_root<
let mut pt_guard = cur_node_guard.unwrap_or_else(|| { let mut pt_guard = cur_node_guard.unwrap_or_else(|| {
// SAFETY: The node must be alive for at least `'rcu` since the // SAFETY: The node must be alive for at least `'rcu` since the
// address is read from the page table node. // address is read from the page table node.
let node_ref = unsafe { PageTableNodeRef::<'rcu, E, C>::borrow_paddr(cur_pt_addr) }; let node_ref = unsafe { PageTableNodeRef::<'rcu, C>::borrow_paddr(cur_pt_addr) };
node_ref.lock(guard) node_ref.lock(guard)
}); });
if *pt_guard.stray_mut() { if *pt_guard.stray_mut() {
@ -178,9 +170,9 @@ fn try_traverse_and_lock_subtree_root<
/// must be within the range of the `cur_node`. The range must not be empty. /// must be within the range of the `cur_node`. The range must not be empty.
/// ///
/// The function will forget all the [`PageTableGuard`] objects in the sub-tree. /// The function will forget all the [`PageTableGuard`] objects in the sub-tree.
fn dfs_acquire_lock<E: PageTableEntryTrait, C: PagingConstsTrait>( fn dfs_acquire_lock<C: PageTableConfig>(
guard: &dyn InAtomicMode, guard: &dyn InAtomicMode,
cur_node: &mut PageTableGuard<'_, E, C>, cur_node: &mut PageTableGuard<'_, C>,
cur_node_va: Vaddr, cur_node_va: Vaddr,
va_range: Range<Vaddr>, va_range: Range<Vaddr>,
) { ) {
@ -215,9 +207,9 @@ fn dfs_acquire_lock<E: PageTableEntryTrait, C: PagingConstsTrait>(
/// ///
/// The caller must ensure that the nodes in the specified sub-tree are locked /// The caller must ensure that the nodes in the specified sub-tree are locked
/// and all guards are forgotten. /// and all guards are forgotten.
unsafe fn dfs_release_lock<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait>( unsafe fn dfs_release_lock<'rcu, C: PageTableConfig>(
guard: &'rcu dyn InAtomicMode, guard: &'rcu dyn InAtomicMode,
mut cur_node: PageTableGuard<'rcu, E, C>, mut cur_node: PageTableGuard<'rcu, C>,
cur_node_va: Vaddr, cur_node_va: Vaddr,
va_range: Range<Vaddr>, va_range: Range<Vaddr>,
) { ) {
@ -261,9 +253,9 @@ unsafe fn dfs_release_lock<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait>(
/// ///
/// This function must not be called upon a shared node, e.g., the second- /// This function must not be called upon a shared node, e.g., the second-
/// top level nodes that the kernel space and user space share. /// top level nodes that the kernel space and user space share.
pub(super) unsafe fn dfs_mark_stray_and_unlock<E: PageTableEntryTrait, C: PagingConstsTrait>( pub(super) unsafe fn dfs_mark_stray_and_unlock<C: PageTableConfig>(
rcu_guard: &dyn InAtomicMode, rcu_guard: &dyn InAtomicMode,
mut sub_tree: PageTableGuard<E, C>, mut sub_tree: PageTableGuard<C>,
) -> usize { ) -> usize {
*sub_tree.stray_mut() = true; *sub_tree.stray_mut() = true;

View File

@ -34,9 +34,9 @@ use core::{any::TypeId, fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops:
use align_ext::AlignExt; use align_ext::AlignExt;
use super::{ use super::{
page_size, pte_index, Child, Entry, KernelMode, MapTrackingStatus, PageTable, is_valid_range, page_size, pte_index, Child, Entry, KernelPtConfig, MapTrackingStatus,
PageTableEntryTrait, PageTableError, PageTableGuard, PageTableMode, PagingConstsTrait, PageTable, PageTableConfig, PageTableError, PageTableGuard, PagingConstsTrait, PagingLevel,
PagingLevel, UserMode, UserPtConfig,
}; };
use crate::{ use crate::{
mm::{ mm::{
@ -54,12 +54,12 @@ use crate::{
/// A cursor is able to move to the next slot, to read page properties, /// A cursor is able to move to the next slot, to read page properties,
/// and even to jump to a virtual address directly. /// and even to jump to a virtual address directly.
#[derive(Debug)] #[derive(Debug)]
pub struct Cursor<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> { pub struct Cursor<'rcu, C: PageTableConfig> {
/// The current path of the cursor. /// The current path of the cursor.
/// ///
/// The level 1 page table lock guard is at index 0, and the level N page /// The level 1 page table lock guard is at index 0, and the level N page
/// table lock guard is at index N - 1. /// table lock guard is at index N - 1.
path: [Option<PageTableGuard<'rcu, E, C>>; MAX_NR_LEVELS], path: [Option<PageTableGuard<'rcu, C>>; MAX_NR_LEVELS],
/// The cursor should be used in a RCU read side critical section. /// The cursor should be used in a RCU read side critical section.
rcu_guard: &'rcu dyn InAtomicMode, rcu_guard: &'rcu dyn InAtomicMode,
/// The level of the page table that the cursor currently points to. /// The level of the page table that the cursor currently points to.
@ -72,7 +72,7 @@ pub struct Cursor<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConst
va: Vaddr, va: Vaddr,
/// The virtual address range that is locked. /// The virtual address range that is locked.
barrier_va: Range<Vaddr>, barrier_va: Range<Vaddr>,
_phantom: PhantomData<&'rcu PageTable<M, E, C>>, _phantom: PhantomData<&'rcu PageTable<C>>,
} }
/// The maximum value of `PagingConstsTrait::NR_LEVELS`. /// The maximum value of `PagingConstsTrait::NR_LEVELS`.
@ -106,18 +106,18 @@ pub enum PageTableItem {
}, },
} }
impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Cursor<'rcu, M, E, C> { impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> {
/// Creates a cursor claiming exclusive access over the given range. /// Creates a cursor claiming exclusive access over the given range.
/// ///
/// The cursor created will only be able to query or jump within the given /// The cursor created will only be able to query or jump within the given
/// range. Out-of-bound accesses will result in panics or errors as return values, /// range. Out-of-bound accesses will result in panics or errors as return values,
/// depending on the access method. /// depending on the access method.
pub fn new( pub fn new(
pt: &'rcu PageTable<M, E, C>, pt: &'rcu PageTable<C>,
guard: &'rcu dyn InAtomicMode, guard: &'rcu dyn InAtomicMode,
va: &Range<Vaddr>, va: &Range<Vaddr>,
) -> Result<Self, PageTableError> { ) -> Result<Self, PageTableError> {
if !M::covers(va) || va.is_empty() { if !is_valid_range::<C>(va) || va.is_empty() {
return Err(PageTableError::InvalidVaddrRange(va.start, va.end)); return Err(PageTableError::InvalidVaddrRange(va.start, va.end));
} }
if va.start % C::BASE_PAGE_SIZE != 0 || va.end % C::BASE_PAGE_SIZE != 0 { if va.start % C::BASE_PAGE_SIZE != 0 || va.end % C::BASE_PAGE_SIZE != 0 {
@ -125,8 +125,7 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Curso
} }
const { assert!(C::NR_LEVELS as usize <= MAX_NR_LEVELS) }; const { assert!(C::NR_LEVELS as usize <= MAX_NR_LEVELS) };
let new_pt_is_tracked = if should_map_as_tracked::<C>(va.start) {
let new_pt_is_tracked = if should_map_as_tracked::<M>(va.start) {
MapTrackingStatus::Tracked MapTrackingStatus::Tracked
} else { } else {
MapTrackingStatus::Untracked MapTrackingStatus::Untracked
@ -325,15 +324,15 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Curso
} }
/// Goes down a level to a child page table. /// Goes down a level to a child page table.
fn push_level(&mut self, child_guard: PageTableGuard<'rcu, E, C>) { fn push_level(&mut self, child_pt: PageTableGuard<'rcu, C>) {
self.level -= 1; self.level -= 1;
debug_assert_eq!(self.level, child_guard.level()); debug_assert_eq!(self.level, child_pt.level());
let old = self.path[self.level as usize - 1].replace(child_guard); let old = self.path[self.level as usize - 1].replace(child_pt);
debug_assert!(old.is_none()); debug_assert!(old.is_none());
} }
fn cur_entry(&mut self) -> Entry<'_, 'rcu, E, C> { fn cur_entry(&mut self) -> Entry<'_, 'rcu, C> {
let node = self.path[self.level as usize - 1].as_mut().unwrap(); let node = self.path[self.level as usize - 1].as_mut().unwrap();
node.entry(pte_index::<C>(self.va, self.level)) node.entry(pte_index::<C>(self.va, self.level))
} }
@ -346,15 +345,13 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Curso
} }
} }
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Drop for Cursor<'_, M, E, C> { impl<C: PageTableConfig> Drop for Cursor<'_, C> {
fn drop(&mut self) { fn drop(&mut self) {
locking::unlock_range(self); locking::unlock_range(self);
} }
} }
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Iterator impl<C: PageTableConfig> Iterator for Cursor<'_, C> {
for Cursor<'_, M, E, C>
{
type Item = PageTableItem; type Item = PageTableItem;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -373,20 +370,16 @@ impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Iterator
/// in a page table can only be accessed by one cursor, regardless of the /// in a page table can only be accessed by one cursor, regardless of the
/// mutability of the cursor. /// mutability of the cursor.
#[derive(Debug)] #[derive(Debug)]
pub struct CursorMut<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( pub struct CursorMut<'rcu, C: PageTableConfig>(Cursor<'rcu, C>);
Cursor<'rcu, M, E, C>,
);
impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> impl<'rcu, C: PageTableConfig> CursorMut<'rcu, C> {
CursorMut<'rcu, M, E, C>
{
/// Creates a cursor claiming exclusive access over the given range. /// Creates a cursor claiming exclusive access over the given range.
/// ///
/// The cursor created will only be able to map, query or jump within the given /// The cursor created will only be able to map, query or jump within the given
/// range. Out-of-bound accesses will result in panics or errors as return values, /// range. Out-of-bound accesses will result in panics or errors as return values,
/// depending on the access method. /// depending on the access method.
pub(super) fn new( pub(super) fn new(
pt: &'rcu PageTable<M, E, C>, pt: &'rcu PageTable<C>,
guard: &'rcu dyn InAtomicMode, guard: &'rcu dyn InAtomicMode,
va: &Range<Vaddr>, va: &Range<Vaddr>,
) -> Result<Self, PageTableError> { ) -> Result<Self, PageTableError> {
@ -452,7 +445,7 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
|| self.0.va % page_size::<C>(self.0.level) != 0 || self.0.va % page_size::<C>(self.0.level) != 0
|| self.0.va + page_size::<C>(self.0.level) > end || self.0.va + page_size::<C>(self.0.level) > end
{ {
debug_assert!(should_map_as_tracked::<M>(self.0.va)); debug_assert!(should_map_as_tracked::<C>(self.0.va));
let mut cur_entry = self.0.cur_entry(); let mut cur_entry = self.0.cur_entry();
match cur_entry.to_ref() { match cur_entry.to_ref() {
Child::PageTableRef(pt) => { Child::PageTableRef(pt) => {
@ -537,8 +530,8 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
// We ensure not mapping in reserved kernel shared tables or releasing it. // We ensure not mapping in reserved kernel shared tables or releasing it.
// Although it may be an invariant for all architectures and will be optimized // Although it may be an invariant for all architectures and will be optimized
// out by the compiler since `C::NR_LEVELS - 1 > C::HIGHEST_TRANSLATION_LEVEL`. // out by the compiler since `C::NR_LEVELS - 1 > C::HIGHEST_TRANSLATION_LEVEL`.
let is_kernel_shared_node = let is_kernel_shared_node = TypeId::of::<C>() == TypeId::of::<KernelPtConfig>()
TypeId::of::<M>() == TypeId::of::<KernelMode>() && self.0.level >= C::NR_LEVELS - 1; && self.0.level >= C::NR_LEVELS - 1;
if self.0.level > C::HIGHEST_TRANSLATION_LEVEL if self.0.level > C::HIGHEST_TRANSLATION_LEVEL
|| is_kernel_shared_node || is_kernel_shared_node
|| self.0.va % page_size::<C>(self.0.level) != 0 || self.0.va % page_size::<C>(self.0.level) != 0
@ -572,10 +565,9 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
continue; continue;
} }
let level = self.0.level;
// Map the current page. // Map the current page.
debug_assert!(!should_map_as_tracked::<M>(self.0.va)); debug_assert!(!should_map_as_tracked::<C>(self.0.va));
let level = self.0.level;
let mut cur_entry = self.0.cur_entry(); let mut cur_entry = self.0.cur_entry();
let _ = cur_entry.replace(Child::Untracked(pa, level, prop)); let _ = cur_entry.replace(Child::Untracked(pa, level, prop));
@ -636,7 +628,7 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
} }
Child::PageTable(pt) => { Child::PageTable(pt) => {
assert!( assert!(
!(TypeId::of::<M>() == TypeId::of::<KernelMode>() !(TypeId::of::<C>() == TypeId::of::<KernelPtConfig>()
&& self.0.level == C::NR_LEVELS), && self.0.level == C::NR_LEVELS),
"Unmapping shared kernel page table nodes" "Unmapping shared kernel page table nodes"
); );
@ -706,8 +698,8 @@ impl<'rcu, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
} }
} }
fn should_map_as_tracked<M: PageTableMode>(va: Vaddr) -> bool { fn should_map_as_tracked<C: PageTableConfig>(va: Vaddr) -> bool {
(TypeId::of::<M>() == TypeId::of::<KernelMode>() TypeId::of::<C>() == TypeId::of::<KernelPtConfig>()
|| TypeId::of::<M>() == TypeId::of::<UserMode>())
&& crate::mm::kspace::should_map_as_tracked(va) && crate::mm::kspace::should_map_as_tracked(va)
|| TypeId::of::<C>() == TypeId::of::<UserPtConfig>()
} }

View File

@ -3,14 +3,13 @@
use core::{ use core::{
fmt::Debug, fmt::Debug,
intrinsics::transmute_unchecked, intrinsics::transmute_unchecked,
marker::PhantomData, ops::{Range, RangeInclusive},
ops::Range,
sync::atomic::{AtomicUsize, Ordering}, sync::atomic::{AtomicUsize, Ordering},
}; };
use super::{ use super::{
nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait, PagingLevel, kspace::KernelPtConfig, nr_subpage_per_huge, page_prop::PageProperty, page_size,
PodOnce, Vaddr, vm_space::UserPtConfig, Paddr, PagingConstsTrait, PagingLevel, PodOnce, Vaddr,
}; };
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
@ -39,31 +38,93 @@ pub enum PageTableError {
UnalignedVaddr, UnalignedVaddr,
} }
/// This is a compile-time technique to force the frame developers to distinguish /// The configurations of a page table.
/// between the kernel global page table instance, process specific user page table ///
/// instance, and device page table instances. /// It abstracts away both the usage and the architecture specifics from the
pub trait PageTableMode: Clone + Debug + 'static { /// general page table implementation. For examples:
/// The range of virtual addresses that the page table can manage. /// - the managed virtual address range;
const VADDR_RANGE: Range<Vaddr>; /// - the trackedness of physical mappings;
/// - the PTE layout;
/// - the number of page table levels, etc.
pub(crate) trait PageTableConfig: Clone + Debug + Send + Sync + 'static {
/// The index range at the top level (`C::NR_LEVELS`) page table.
///
/// When configured with this value, the [`PageTable`] instance will only
/// be allowed to manage the virtual address range that is covered by
/// this range. The range can be smaller than the actual allowed range
/// specified by the hardware MMU (limited by `C::ADDRESS_WIDTH`).
const TOP_LEVEL_INDEX_RANGE: Range<usize>;
/// Check if the given range is covered by the valid virtual address range. type E: PageTableEntryTrait;
fn covers(r: &Range<Vaddr>) -> bool { type C: PagingConstsTrait;
Self::VADDR_RANGE.start <= r.start && r.end <= Self::VADDR_RANGE.end }
// Implement it so that we can comfortably use low level functions
// like `page_size::<C>` without typing `C::C` everywhere.
impl<C: PageTableConfig> PagingConstsTrait for C {
const BASE_PAGE_SIZE: usize = C::C::BASE_PAGE_SIZE;
const NR_LEVELS: PagingLevel = C::C::NR_LEVELS;
const HIGHEST_TRANSLATION_LEVEL: PagingLevel = C::C::HIGHEST_TRANSLATION_LEVEL;
const PTE_SIZE: usize = C::C::PTE_SIZE;
const ADDRESS_WIDTH: usize = C::C::ADDRESS_WIDTH;
const VA_SIGN_EXT: bool = C::C::VA_SIGN_EXT;
}
/// Gets the managed virtual addresses range for the page table.
///
/// It returns a [`RangeInclusive`] because the end address, if being
/// [`Vaddr::MAX`], overflows [`Range<Vaddr>`].
const fn vaddr_range<C: PageTableConfig>() -> RangeInclusive<Vaddr> {
const fn top_level_index_width<C: PageTableConfig>() -> usize {
C::ADDRESS_WIDTH - pte_index_bit_offset::<C>(C::NR_LEVELS)
} }
const {
assert!(C::TOP_LEVEL_INDEX_RANGE.start < C::TOP_LEVEL_INDEX_RANGE.end);
assert!(top_level_index_width::<C>() <= nr_pte_index_bits::<C>(),);
assert!(C::TOP_LEVEL_INDEX_RANGE.start < 1 << top_level_index_width::<C>());
assert!(C::TOP_LEVEL_INDEX_RANGE.end <= 1 << top_level_index_width::<C>());
};
const fn pt_va_range_start<C: PageTableConfig>() -> Vaddr {
C::TOP_LEVEL_INDEX_RANGE.start << pte_index_bit_offset::<C>(C::NR_LEVELS)
}
const fn pt_va_range_end<C: PageTableConfig>() -> Vaddr {
C::TOP_LEVEL_INDEX_RANGE
.end
.unbounded_shl(pte_index_bit_offset::<C>(C::NR_LEVELS) as u32)
.wrapping_sub(1) // Inclusive end.
}
const fn sign_bit_of_va<C: PageTableConfig>(va: Vaddr) -> bool {
(va >> (C::ADDRESS_WIDTH - 1)) & 1 != 0
}
let mut start = pt_va_range_start::<C>();
let mut end = pt_va_range_end::<C>();
if C::VA_SIGN_EXT {
const {
assert!(
sign_bit_of_va::<C>(pt_va_range_start::<C>())
== sign_bit_of_va::<C>(pt_va_range_end::<C>())
)
}
if sign_bit_of_va::<C>(pt_va_range_start::<C>()) {
start |= !0 ^ ((1 << C::ADDRESS_WIDTH) - 1);
end |= !0 ^ ((1 << C::ADDRESS_WIDTH) - 1);
}
}
start..=end
} }
#[derive(Clone, Debug)] /// Check if the given range is covered by the valid range of the page table.
pub struct UserMode {} const fn is_valid_range<C: PageTableConfig>(r: &Range<Vaddr>) -> bool {
let va_range = vaddr_range::<C>();
impl PageTableMode for UserMode { *va_range.start() <= r.start && (r.end == 0 || r.end - 1 <= *va_range.end())
const VADDR_RANGE: Range<Vaddr> = 0..super::MAX_USERSPACE_VADDR;
}
#[derive(Clone, Debug)]
pub struct KernelMode {}
impl PageTableMode for KernelMode {
const VADDR_RANGE: Range<Vaddr> = super::KERNEL_VADDR_RANGE;
} }
// Here are some const values that are determined by the paging constants. // Here are some const values that are determined by the paging constants.
@ -75,25 +136,28 @@ const fn nr_pte_index_bits<C: PagingConstsTrait>() -> usize {
/// The index of a VA's PTE in a page table node at the given level. /// The index of a VA's PTE in a page table node at the given level.
const fn pte_index<C: PagingConstsTrait>(va: Vaddr, level: PagingLevel) -> usize { const fn pte_index<C: PagingConstsTrait>(va: Vaddr, level: PagingLevel) -> usize {
(va >> (C::BASE_PAGE_SIZE.ilog2() as usize + nr_pte_index_bits::<C>() * (level as usize - 1))) (va >> pte_index_bit_offset::<C>(level)) & (nr_subpage_per_huge::<C>() - 1)
& (nr_subpage_per_huge::<C>() - 1) }
/// The bit offset of the entry offset part in a virtual address.
///
/// This function returns the bit offset of the least significant bit. Take
/// x86-64 as an example, the `pte_index_bit_offset(2)` should return 21, which
/// is 12 (the 4KiB in-page offset) plus 9 (index width in the level-1 table).
const fn pte_index_bit_offset<C: PagingConstsTrait>(level: PagingLevel) -> usize {
C::BASE_PAGE_SIZE.ilog2() as usize + nr_pte_index_bits::<C>() * (level as usize - 1)
} }
/// A handle to a page table. /// A handle to a page table.
/// A page table can track the lifetime of the mapped physical pages. /// A page table can track the lifetime of the mapped physical pages.
#[derive(Debug)] #[derive(Debug)]
pub struct PageTable< pub struct PageTable<C: PageTableConfig> {
M: PageTableMode, root: PageTableNode<C>,
E: PageTableEntryTrait = PageTableEntry,
C: PagingConstsTrait = PagingConsts,
> {
root: PageTableNode<E, C>,
_phantom: PhantomData<M>,
} }
impl PageTable<UserMode> { impl PageTable<UserPtConfig> {
pub fn activate(&self) { pub fn activate(&self) {
// SAFETY: The usermode page table is safe to activate since the kernel // SAFETY: The user mode page table is safe to activate since the kernel
// mappings are shared. // mappings are shared.
unsafe { unsafe {
self.root.activate(); self.root.activate();
@ -101,7 +165,7 @@ impl PageTable<UserMode> {
} }
} }
impl PageTable<KernelMode> { impl PageTable<KernelPtConfig> {
/// Create a new kernel page table. /// Create a new kernel page table.
pub(crate) fn new_kernel_page_table() -> Self { pub(crate) fn new_kernel_page_table() -> Self {
let kpt = Self::empty(); let kpt = Self::empty();
@ -111,10 +175,7 @@ impl PageTable<KernelMode> {
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
let mut root_node = kpt.root.borrow().lock(&preempt_guard); let mut root_node = kpt.root.borrow().lock(&preempt_guard);
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>(); for i in KernelPtConfig::TOP_LEVEL_INDEX_RANGE {
let kernel_space_range = NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE;
for i in kernel_space_range {
let mut root_entry = root_node.entry(i); let mut root_entry = root_node.entry(i);
let is_tracked = if super::kspace::should_map_as_tracked( let is_tracked = if super::kspace::should_map_as_tracked(
i * page_size::<PagingConsts>(PagingConsts::NR_LEVELS - 1), i * page_size::<PagingConsts>(PagingConsts::NR_LEVELS - 1),
@ -136,7 +197,7 @@ impl PageTable<KernelMode> {
/// ///
/// This should be the only way to create the user page table, that is to /// This should be the only way to create the user page table, that is to
/// duplicate the kernel page table with all the kernel mappings shared. /// duplicate the kernel page table with all the kernel mappings shared.
pub fn create_user_page_table(&self) -> PageTable<UserMode> { pub(in crate::mm) fn create_user_page_table(&'static self) -> PageTable<UserPtConfig> {
let new_root = let new_root =
PageTableNode::alloc(PagingConsts::NR_LEVELS, MapTrackingStatus::NotApplicable); PageTableNode::alloc(PagingConsts::NR_LEVELS, MapTrackingStatus::NotApplicable);
@ -144,28 +205,25 @@ impl PageTable<KernelMode> {
let mut root_node = self.root.borrow().lock(&preempt_guard); let mut root_node = self.root.borrow().lock(&preempt_guard);
let mut new_node = new_root.borrow().lock(&preempt_guard); let mut new_node = new_root.borrow().lock(&preempt_guard);
// Make a shallow copy of the root node in the kernel space range. for i in KernelPtConfig::TOP_LEVEL_INDEX_RANGE {
// The user space range is not copied.
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
for i in NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE {
let root_entry = root_node.entry(i); let root_entry = root_node.entry(i);
let child = root_entry.to_ref(); let child = root_entry.to_ref();
let Child::PageTableRef(pt) = child else { let Child::PageTableRef(pt) = child else {
panic!("The kernel page table doesn't contain shared nodes"); panic!("The kernel page table doesn't contain shared nodes");
}; };
let pt_cloned = pt.clone();
let _ = new_node // We do not add additional reference count specifically for the
.entry(i) // shared kernel page tables. It requires user page tables to
.replace(Child::PageTable(crate::sync::RcuDrop::new(pt_cloned))); // outlive the kernel page table, which is trivially true.
// See also `<PageTablePageMeta as AnyFrameMeta>::on_drop`.
let pt_addr = pt.start_paddr();
let pte = PageTableEntry::new_pt(pt_addr);
// SAFETY: The index is within the bounds and the new PTE is compatible.
unsafe { new_node.write_pte(i, pte) };
} }
drop(new_node); drop(new_node);
PageTable::<UserMode> { PageTable::<UserPtConfig> { root: new_root }
root: new_root,
_phantom: PhantomData,
}
} }
/// Protect the given virtual address range in the kernel page table. /// Protect the given virtual address range in the kernel page table.
@ -193,14 +251,13 @@ impl PageTable<KernelMode> {
} }
} }
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M, E, C> { impl<C: PageTableConfig> PageTable<C> {
/// Create a new empty page table. /// Create a new empty page table.
/// ///
/// Useful for the IOMMU page tables only. /// Useful for the IOMMU page tables only.
pub fn empty() -> Self { pub fn empty() -> Self {
PageTable { PageTable {
root: PageTableNode::<E, C>::alloc(C::NR_LEVELS, MapTrackingStatus::NotApplicable), root: PageTableNode::<C>::alloc(C::NR_LEVELS, MapTrackingStatus::NotApplicable),
_phantom: PhantomData,
} }
} }
@ -239,7 +296,7 @@ impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M
#[cfg(ktest)] #[cfg(ktest)]
pub fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> { pub fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
// SAFETY: The root node is a valid page table node so the address is valid. // SAFETY: The root node is a valid page table node so the address is valid.
unsafe { page_walk::<E, C>(self.root_paddr(), vaddr) } unsafe { page_walk::<C>(self.root_paddr(), vaddr) }
} }
/// Create a new cursor exclusively accessing the virtual address range for mapping. /// Create a new cursor exclusively accessing the virtual address range for mapping.
@ -250,7 +307,7 @@ impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M
&'rcu self, &'rcu self,
guard: &'rcu G, guard: &'rcu G,
va: &Range<Vaddr>, va: &Range<Vaddr>,
) -> Result<CursorMut<'rcu, M, E, C>, PageTableError> { ) -> Result<CursorMut<'rcu, C>, PageTableError> {
CursorMut::new(self, guard.as_atomic_mode_guard(), va) CursorMut::new(self, guard.as_atomic_mode_guard(), va)
} }
@ -263,7 +320,7 @@ impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M
&'rcu self, &'rcu self,
guard: &'rcu G, guard: &'rcu G,
va: &Range<Vaddr>, va: &Range<Vaddr>,
) -> Result<Cursor<'rcu, M, E, C>, PageTableError> { ) -> Result<Cursor<'rcu, C>, PageTableError> {
Cursor::new(self, guard.as_atomic_mode_guard(), va) Cursor::new(self, guard.as_atomic_mode_guard(), va)
} }
@ -273,7 +330,6 @@ impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M
pub unsafe fn shallow_copy(&self) -> Self { pub unsafe fn shallow_copy(&self) -> Self {
PageTable { PageTable {
root: self.root.clone(), root: self.root.clone(),
_phantom: PhantomData,
} }
} }
} }
@ -297,7 +353,7 @@ impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M
/// To mitigate this problem, the page table nodes are by default not /// To mitigate this problem, the page table nodes are by default not
/// actively recycled, until we find an appropriate solution. /// actively recycled, until we find an appropriate solution.
#[cfg(ktest)] #[cfg(ktest)]
pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>( pub(super) unsafe fn page_walk<C: PageTableConfig>(
root_paddr: Paddr, root_paddr: Paddr,
vaddr: Vaddr, vaddr: Vaddr,
) -> Option<(Paddr, PageProperty)> { ) -> Option<(Paddr, PageProperty)> {
@ -310,7 +366,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
let node_addr = paddr_to_vaddr(root_paddr); let node_addr = paddr_to_vaddr(root_paddr);
let offset = pte_index::<C>(vaddr, cur_level); let offset = pte_index::<C>(vaddr, cur_level);
// SAFETY: The offset does not exceed the value of PAGE_SIZE. // SAFETY: The offset does not exceed the value of PAGE_SIZE.
unsafe { (node_addr as *const E).add(offset).read() } unsafe { (node_addr as *const C::E).add(offset).read() }
}; };
while cur_level > 1 { while cur_level > 1 {
@ -328,7 +384,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
let node_addr = paddr_to_vaddr(cur_pte.paddr()); let node_addr = paddr_to_vaddr(cur_pte.paddr());
let offset = pte_index::<C>(vaddr, cur_level); let offset = pte_index::<C>(vaddr, cur_level);
// SAFETY: The offset does not exceed the value of PAGE_SIZE. // SAFETY: The offset does not exceed the value of PAGE_SIZE.
unsafe { (node_addr as *const E).add(offset).read() } unsafe { (node_addr as *const C::E).add(offset).read() }
}; };
} }

View File

@ -4,12 +4,13 @@
use core::{mem::ManuallyDrop, panic}; use core::{mem::ManuallyDrop, panic};
use super::{MapTrackingStatus, PageTableEntryTrait, PageTableNode, PageTableNodeRef}; use super::{MapTrackingStatus, PageTableEntryTrait, PageTableNode};
use crate::{ use crate::{
mm::{ mm::{
frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame}, frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame},
page_prop::PageProperty, page_prop::PageProperty,
Paddr, PagingConstsTrait, PagingLevel, page_table::{PageTableConfig, PageTableNodeRef},
Paddr, PagingLevel,
}, },
sync::RcuDrop, sync::RcuDrop,
}; };
@ -17,11 +18,11 @@ use crate::{
/// A child of a page table node. /// A child of a page table node.
// TODO: Distinguish between the reference and the owning child. // TODO: Distinguish between the reference and the owning child.
#[derive(Debug)] #[derive(Debug)]
pub(in crate::mm) enum Child<'a, E: PageTableEntryTrait, C: PagingConstsTrait> { pub(in crate::mm) enum Child<'a, C: PageTableConfig> {
/// A owning handle to a raw page table node. /// A owning handle to a raw page table node.
PageTable(RcuDrop<PageTableNode<E, C>>), PageTable(RcuDrop<PageTableNode<C>>),
/// A reference of a child page table node. /// A reference of a child page table node.
PageTableRef(PageTableNodeRef<'a, E, C>), PageTableRef(PageTableNodeRef<'a, C>),
/// A mapped frame. /// A mapped frame.
Frame(Frame<dyn AnyFrameMeta>, PageProperty), Frame(Frame<dyn AnyFrameMeta>, PageProperty),
/// Mapped frames that are not tracked by handles. /// Mapped frames that are not tracked by handles.
@ -29,7 +30,7 @@ pub(in crate::mm) enum Child<'a, E: PageTableEntryTrait, C: PagingConstsTrait> {
None, None,
} }
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<'_, E, C> { impl<C: PageTableConfig> Child<'_, C> {
/// Returns whether the child does not map to anything. /// Returns whether the child does not map to anything.
pub(in crate::mm) fn is_none(&self) -> bool { pub(in crate::mm) fn is_none(&self) -> bool {
matches!(self, Child::None) matches!(self, Child::None)
@ -66,21 +67,21 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<'_, E, C> {
/// Usually this is for recording the PTE into a page table node. When the /// Usually this is for recording the PTE into a page table node. When the
/// child is needed again by reading the PTE of a page table node, extra /// child is needed again by reading the PTE of a page table node, extra
/// information should be provided using the [`Child::from_pte`] method. /// information should be provided using the [`Child::from_pte`] method.
pub(super) fn into_pte(self) -> E { pub(super) fn into_pte(self) -> C::E {
match self { match self {
Child::PageTable(pt) => { Child::PageTable(pt) => {
let pt = ManuallyDrop::new(pt); let pt = ManuallyDrop::new(pt);
E::new_pt(pt.start_paddr()) C::E::new_pt(pt.start_paddr())
} }
Child::PageTableRef(_) => { Child::PageTableRef(_) => {
panic!("`PageTableRef` should not be converted to PTE"); panic!("`PageTableRef` should not be converted to PTE");
} }
Child::Frame(page, prop) => { Child::Frame(page, prop) => {
let level = page.map_level(); let level = page.map_level();
E::new_page(page.into_raw(), level, prop) C::E::new_page(page.into_raw(), level, prop)
} }
Child::Untracked(pa, level, prop) => E::new_page(pa, level, prop), Child::Untracked(pa, level, prop) => C::E::new_page(pa, level, prop),
Child::None => E::new_absent(), Child::None => C::E::new_absent(),
} }
} }
@ -97,7 +98,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<'_, E, C> {
/// This method should be only used no more than once for a PTE that has /// This method should be only used no more than once for a PTE that has
/// been converted from a child using the [`Child::into_pte`] method. /// been converted from a child using the [`Child::into_pte`] method.
pub(super) unsafe fn from_pte( pub(super) unsafe fn from_pte(
pte: E, pte: C::E,
level: PagingLevel, level: PagingLevel,
is_tracked: MapTrackingStatus, is_tracked: MapTrackingStatus,
) -> Self { ) -> Self {
@ -141,7 +142,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<'_, E, C> {
/// This method must not be used with a PTE that has been restored to a /// This method must not be used with a PTE that has been restored to a
/// child using the [`Child::from_pte`] method. /// child using the [`Child::from_pte`] method.
pub(super) unsafe fn ref_from_pte( pub(super) unsafe fn ref_from_pte(
pte: &E, pte: &C::E,
level: PagingLevel, level: PagingLevel,
is_tracked: MapTrackingStatus, is_tracked: MapTrackingStatus,
) -> Self { ) -> Self {

View File

@ -4,11 +4,14 @@
use core::mem::ManuallyDrop; use core::mem::ManuallyDrop;
use super::{ use super::{Child, MapTrackingStatus, PageTableEntryTrait, PageTableGuard, PageTableNode};
Child, MapTrackingStatus, PageTableEntryTrait, PageTableGuard, PageTableNode, PageTableNodeRef,
};
use crate::{ use crate::{
mm::{nr_subpage_per_huge, page_prop::PageProperty, page_size, PagingConstsTrait}, mm::{
nr_subpage_per_huge,
page_prop::PageProperty,
page_size,
page_table::{PageTableConfig, PageTableNodeRef},
},
sync::RcuDrop, sync::RcuDrop,
task::atomic_mode::InAtomicMode, task::atomic_mode::InAtomicMode,
}; };
@ -20,7 +23,7 @@ use crate::{
/// This is a static reference to an entry in a node that does not account for /// This is a static reference to an entry in a node that does not account for
/// a dynamic reference count to the child. It can be used to create a owned /// a dynamic reference count to the child. It can be used to create a owned
/// handle, which is a [`Child`]. /// handle, which is a [`Child`].
pub(in crate::mm) struct Entry<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> { pub(in crate::mm) struct Entry<'a, 'rcu, C: PageTableConfig> {
/// The page table entry. /// The page table entry.
/// ///
/// We store the page table entry here to optimize the number of reads from /// We store the page table entry here to optimize the number of reads from
@ -28,14 +31,14 @@ pub(in crate::mm) struct Entry<'a, 'rcu, E: PageTableEntryTrait, C: PagingConsts
/// other CPUs may modify the memory location for accessed/dirty bits. Such /// other CPUs may modify the memory location for accessed/dirty bits. Such
/// accesses will violate the aliasing rules of Rust and cause undefined /// accesses will violate the aliasing rules of Rust and cause undefined
/// behaviors. /// behaviors.
pte: E, pte: C::E,
/// The index of the entry in the node. /// The index of the entry in the node.
idx: usize, idx: usize,
/// The node that contains the entry. /// The node that contains the entry.
node: &'a mut PageTableGuard<'rcu, E, C>, node: &'a mut PageTableGuard<'rcu, C>,
} }
impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E, C> { impl<'a, 'rcu, C: PageTableConfig> Entry<'a, 'rcu, C> {
/// Returns if the entry does not map to anything. /// Returns if the entry does not map to anything.
pub(in crate::mm) fn is_none(&self) -> bool { pub(in crate::mm) fn is_none(&self) -> bool {
!self.pte.is_present() !self.pte.is_present()
@ -47,7 +50,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E,
} }
/// Gets a reference to the child. /// Gets a reference to the child.
pub(in crate::mm) fn to_ref(&self) -> Child<'rcu, E, C> { pub(in crate::mm) fn to_ref(&self) -> Child<'rcu, C> {
// SAFETY: The entry structure represents an existent entry with the // SAFETY: The entry structure represents an existent entry with the
// right node information. // right node information.
unsafe { Child::ref_from_pte(&self.pte, self.node.level(), self.node.is_tracked()) } unsafe { Child::ref_from_pte(&self.pte, self.node.level(), self.node.is_tracked()) }
@ -87,7 +90,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E,
/// ///
/// The method panics if the given child is not compatible with the node. /// The method panics if the given child is not compatible with the node.
/// The compatibility is specified by the [`Child::is_compatible`]. /// The compatibility is specified by the [`Child::is_compatible`].
pub(in crate::mm) fn replace(&mut self, new_child: Child<E, C>) -> Child<E, C> { pub(in crate::mm) fn replace(&mut self, new_child: Child<'rcu, C>) -> Child<'rcu, C> {
assert!(new_child.is_compatible(self.node.level(), self.node.is_tracked())); assert!(new_child.is_compatible(self.node.level(), self.node.is_tracked()));
// SAFETY: The entry structure represents an existent entry with the // SAFETY: The entry structure represents an existent entry with the
@ -122,13 +125,13 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E,
&mut self, &mut self,
guard: &'rcu dyn InAtomicMode, guard: &'rcu dyn InAtomicMode,
new_pt_is_tracked: MapTrackingStatus, new_pt_is_tracked: MapTrackingStatus,
) -> Option<PageTableGuard<'rcu, E, C>> { ) -> Option<PageTableGuard<'rcu, C>> {
if !(self.is_none() && self.node.level() > 1) { if !(self.is_none() && self.node.level() > 1) {
return None; return None;
} }
let level = self.node.level(); let level = self.node.level();
let new_page = PageTableNode::<E, C>::alloc(level - 1, new_pt_is_tracked); let new_page = PageTableNode::<C>::alloc(level - 1, new_pt_is_tracked);
let paddr = new_page.start_paddr(); let paddr = new_page.start_paddr();
let _ = ManuallyDrop::new(new_page.borrow().lock(guard)); let _ = ManuallyDrop::new(new_page.borrow().lock(guard));
@ -163,7 +166,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E,
pub(in crate::mm::page_table) fn split_if_untracked_huge( pub(in crate::mm::page_table) fn split_if_untracked_huge(
&mut self, &mut self,
guard: &'rcu dyn InAtomicMode, guard: &'rcu dyn InAtomicMode,
) -> Option<PageTableGuard<'rcu, E, C>> { ) -> Option<PageTableGuard<'rcu, C>> {
let level = self.node.level(); let level = self.node.level();
if !(self.pte.is_last(level) if !(self.pte.is_last(level)
@ -176,7 +179,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E,
let pa = self.pte.paddr(); let pa = self.pte.paddr();
let prop = self.pte.prop(); let prop = self.pte.prop();
let new_page = PageTableNode::<E, C>::alloc(level - 1, MapTrackingStatus::Untracked); let new_page = PageTableNode::<C>::alloc(level - 1, MapTrackingStatus::Untracked);
let mut pt_lock_guard = new_page.borrow().lock(guard); let mut pt_lock_guard = new_page.borrow().lock(guard);
for i in 0..nr_subpage_per_huge::<C>() { for i in 0..nr_subpage_per_huge::<C>() {
@ -211,7 +214,7 @@ impl<'a, 'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Entry<'a, 'rcu, E,
/// # Safety /// # Safety
/// ///
/// The caller must ensure that the index is within the bounds of the node. /// The caller must ensure that the index is within the bounds of the node.
pub(super) unsafe fn new_at(guard: &'a mut PageTableGuard<'rcu, E, C>, idx: usize) -> Self { pub(super) unsafe fn new_at(guard: &'a mut PageTableGuard<'rcu, C>, idx: usize) -> Self {
// SAFETY: The index is within the bound. // SAFETY: The index is within the bound.
let pte = unsafe { guard.read_pte(idx) }; let pte = unsafe { guard.read_pte(idx) };
Self { Self {

View File

@ -36,7 +36,7 @@ use core::{
}; };
pub(in crate::mm) use self::{child::Child, entry::Entry}; pub(in crate::mm) use self::{child::Child, entry::Entry};
use super::{nr_subpage_per_huge, PageTableEntryTrait}; use super::{nr_subpage_per_huge, PageTableConfig, PageTableEntryTrait};
use crate::{ use crate::{
mm::{ mm::{
frame::{meta::AnyFrameMeta, Frame, FrameRef}, frame::{meta::AnyFrameMeta, Frame, FrameRef},
@ -56,9 +56,9 @@ use crate::{
/// ///
/// [`PageTableNode`] is read-only. To modify the page table node, lock and use /// [`PageTableNode`] is read-only. To modify the page table node, lock and use
/// [`PageTableGuard`]. /// [`PageTableGuard`].
pub(super) type PageTableNode<E, C> = Frame<PageTablePageMeta<E, C>>; pub(super) type PageTableNode<C> = Frame<PageTablePageMeta<C>>;
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C> { impl<C: PageTableConfig> PageTableNode<C> {
pub(super) fn level(&self) -> PagingLevel { pub(super) fn level(&self) -> PagingLevel {
self.meta().level self.meta().level
} }
@ -75,7 +75,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C> {
.alloc_frame_with(meta) .alloc_frame_with(meta)
.expect("Failed to allocate a page table node"); .expect("Failed to allocate a page table node");
// The allocated frame is zeroed. Make sure zero is absent PTE. // The allocated frame is zeroed. Make sure zero is absent PTE.
debug_assert!(E::new_absent().as_bytes().iter().all(|&b| b == 0)); debug_assert_eq!(C::E::new_absent().as_usize(), 0);
frame frame
} }
@ -130,15 +130,15 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C> {
} }
/// A reference to a page table node. /// A reference to a page table node.
pub(super) type PageTableNodeRef<'a, E, C> = FrameRef<'a, PageTablePageMeta<E, C>>; pub(super) type PageTableNodeRef<'a, C> = FrameRef<'a, PageTablePageMeta<C>>;
impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C> { impl<'a, C: PageTableConfig> PageTableNodeRef<'a, C> {
/// Locks the page table node. /// Locks the page table node.
/// ///
/// An atomic mode guard is required to /// An atomic mode guard is required to
/// 1. prevent deadlocks; /// 1. prevent deadlocks;
/// 2. provide a lifetime (`'rcu`) that the nodes are guaranteed to outlive. /// 2. provide a lifetime (`'rcu`) that the nodes are guaranteed to outlive.
pub(super) fn lock<'rcu>(self, _guard: &'rcu dyn InAtomicMode) -> PageTableGuard<'rcu, E, C> pub(super) fn lock<'rcu>(self, _guard: &'rcu dyn InAtomicMode) -> PageTableGuard<'rcu, C>
where where
'a: 'rcu, 'a: 'rcu,
{ {
@ -151,7 +151,7 @@ impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C
core::hint::spin_loop(); core::hint::spin_loop();
} }
PageTableGuard::<'rcu, E, C> { inner: self } PageTableGuard::<'rcu, C> { inner: self }
} }
/// Creates a new [`PageTableGuard`] without checking if the page table lock is held. /// Creates a new [`PageTableGuard`] without checking if the page table lock is held.
@ -165,7 +165,7 @@ impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C
pub(super) unsafe fn make_guard_unchecked<'rcu>( pub(super) unsafe fn make_guard_unchecked<'rcu>(
self, self,
_guard: &'rcu dyn InAtomicMode, _guard: &'rcu dyn InAtomicMode,
) -> PageTableGuard<'rcu, E, C> ) -> PageTableGuard<'rcu, C>
where where
'a: 'rcu, 'a: 'rcu,
{ {
@ -175,18 +175,18 @@ impl<'a, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNodeRef<'a, E, C
/// A guard that holds the lock of a page table node. /// A guard that holds the lock of a page table node.
#[derive(Debug)] #[derive(Debug)]
pub(super) struct PageTableGuard<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> { pub(super) struct PageTableGuard<'rcu, C: PageTableConfig> {
inner: PageTableNodeRef<'rcu, E, C>, inner: PageTableNodeRef<'rcu, C>,
} }
impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E, C> { impl<'rcu, C: PageTableConfig> PageTableGuard<'rcu, C> {
/// Borrows an entry in the node at a given index. /// Borrows an entry in the node at a given index.
/// ///
/// # Panics /// # Panics
/// ///
/// Panics if the index is not within the bound of /// Panics if the index is not within the bound of
/// [`nr_subpage_per_huge<C>`]. /// [`nr_subpage_per_huge<C>`].
pub(super) fn entry(&mut self, idx: usize) -> Entry<'_, 'rcu, E, C> { pub(super) fn entry(&mut self, idx: usize) -> Entry<'_, 'rcu, C> {
assert!(idx < nr_subpage_per_huge::<C>()); assert!(idx < nr_subpage_per_huge::<C>());
// SAFETY: The index is within the bound. // SAFETY: The index is within the bound.
unsafe { Entry::new_at(self, idx) } unsafe { Entry::new_at(self, idx) }
@ -213,9 +213,9 @@ impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E,
/// # Safety /// # Safety
/// ///
/// The caller must ensure that the index is within the bound. /// The caller must ensure that the index is within the bound.
unsafe fn read_pte(&self, idx: usize) -> E { pub(super) unsafe fn read_pte(&self, idx: usize) -> C::E {
debug_assert!(idx < nr_subpage_per_huge::<C>()); debug_assert!(idx < nr_subpage_per_huge::<C>());
let ptr = paddr_to_vaddr(self.start_paddr()) as *mut E; let ptr = paddr_to_vaddr(self.start_paddr()) as *mut C::E;
// SAFETY: // SAFETY:
// - The page table node is alive. The index is inside the bound, so the page table entry is valid. // - The page table node is alive. The index is inside the bound, so the page table entry is valid.
// - All page table entries are aligned and accessed with atomic operations only. // - All page table entries are aligned and accessed with atomic operations only.
@ -235,9 +235,9 @@ impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E,
/// 1. The index must be within the bound; /// 1. The index must be within the bound;
/// 2. The PTE must represent a child compatible with this page table node /// 2. The PTE must represent a child compatible with this page table node
/// (see [`Child::is_compatible`]). /// (see [`Child::is_compatible`]).
unsafe fn write_pte(&mut self, idx: usize, pte: E) { pub(super) unsafe fn write_pte(&mut self, idx: usize, pte: C::E) {
debug_assert!(idx < nr_subpage_per_huge::<C>()); debug_assert!(idx < nr_subpage_per_huge::<C>());
let ptr = paddr_to_vaddr(self.start_paddr()) as *mut E; let ptr = paddr_to_vaddr(self.start_paddr()) as *mut C::E;
// SAFETY: // SAFETY:
// - The page table node is alive. The index is inside the bound, so the page table entry is valid. // - The page table node is alive. The index is inside the bound, so the page table entry is valid.
// - All page table entries are aligned and accessed with atomic operations only. // - All page table entries are aligned and accessed with atomic operations only.
@ -251,15 +251,15 @@ impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> PageTableGuard<'rcu, E,
} }
} }
impl<'rcu, E: PageTableEntryTrait, C: PagingConstsTrait> Deref for PageTableGuard<'rcu, E, C> { impl<'rcu, C: PageTableConfig> Deref for PageTableGuard<'rcu, C> {
type Target = PageTableNodeRef<'rcu, E, C>; type Target = PageTableNodeRef<'rcu, C>;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.inner &self.inner
} }
} }
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableGuard<'_, E, C> { impl<C: PageTableConfig> Drop for PageTableGuard<'_, C> {
fn drop(&mut self) { fn drop(&mut self) {
self.inner.meta().lock.store(0, Ordering::Release); self.inner.meta().lock.store(0, Ordering::Release);
} }
@ -268,7 +268,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableGuard<'_, E
/// The metadata of any kinds of page table pages. /// The metadata of any kinds of page table pages.
/// Make sure the the generic parameters don't effect the memory layout. /// Make sure the the generic parameters don't effect the memory layout.
#[derive(Debug)] #[derive(Debug)]
pub(in crate::mm) struct PageTablePageMeta<E: PageTableEntryTrait, C: PagingConstsTrait> { pub(in crate::mm) struct PageTablePageMeta<C: PageTableConfig> {
/// The number of valid PTEs. It is mutable if the lock is held. /// The number of valid PTEs. It is mutable if the lock is held.
pub nr_children: SyncUnsafeCell<u16>, pub nr_children: SyncUnsafeCell<u16>,
/// If the page table is detached from its parent. /// If the page table is detached from its parent.
@ -284,7 +284,7 @@ pub(in crate::mm) struct PageTablePageMeta<E: PageTableEntryTrait, C: PagingCons
pub lock: AtomicU8, pub lock: AtomicU8,
/// Whether the pages mapped by the node is tracked. /// Whether the pages mapped by the node is tracked.
pub is_tracked: MapTrackingStatus, pub is_tracked: MapTrackingStatus,
_phantom: core::marker::PhantomData<(E, C)>, _phantom: core::marker::PhantomData<C>,
} }
/// Describe if the physical address recorded in this page table refers to a /// Describe if the physical address recorded in this page table refers to a
@ -303,7 +303,7 @@ pub(in crate::mm) enum MapTrackingStatus {
Tracked, Tracked,
} }
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTablePageMeta<E, C> { impl<C: PageTableConfig> PageTablePageMeta<C> {
pub fn new(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self { pub fn new(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
Self { Self {
nr_children: SyncUnsafeCell::new(0), nr_children: SyncUnsafeCell::new(0),
@ -318,7 +318,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTablePageMeta<E, C> {
// SAFETY: We can read the page table node because the page table pages are // SAFETY: We can read the page table node because the page table pages are
// accessed as untyped memory. // accessed as untyped memory.
unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> AnyFrameMeta for PageTablePageMeta<E, C> { unsafe impl<C: PageTableConfig> AnyFrameMeta for PageTablePageMeta<C> {
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) { fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
let nr_children = self.nr_children.get_mut(); let nr_children = self.nr_children.get_mut();
@ -330,13 +330,20 @@ unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> AnyFrameMeta for PageT
let is_tracked = self.is_tracked; let is_tracked = self.is_tracked;
// Drop the children. // Drop the children.
while let Ok(pte) = reader.read_once::<E>() { let range = if level == C::NR_LEVELS {
// Here if we use directly `Child::from_pte` we would experience a C::TOP_LEVEL_INDEX_RANGE.clone()
// 50% increase in the overhead of the `drop` function. It seems that } else {
// Rust is very conservative about inlining and optimizing dead code 0..nr_subpage_per_huge::<C>()
// for `unsafe` code. So we manually inline the function here. };
reader.skip(range.start * size_of::<C::E>());
for _ in range {
// Non-atomic read is OK because we have mutable access.
let pte = reader.read_once::<C::E>().unwrap();
if pte.is_present() { if pte.is_present() {
let paddr = pte.paddr(); let paddr = pte.paddr();
// As a fast path, we can ensure that the type of the child frame
// is `Self` if the PTE points to a child page table. Then we don't
// need to check the vtable for the drop method.
if !pte.is_last(level) { if !pte.is_last(level) {
// SAFETY: The PTE points to a page table node. The ownership // SAFETY: The PTE points to a page table node. The ownership
// of the child is transferred to the child then dropped. // of the child is transferred to the child then dropped.

View File

@ -3,7 +3,7 @@
use super::*; use super::*;
use crate::{ use crate::{
mm::{ mm::{
kspace::LINEAR_MAPPING_BASE_VADDR, kspace::{KernelPtConfig, LINEAR_MAPPING_BASE_VADDR},
page_prop::{CachePolicy, PageFlags}, page_prop::{CachePolicy, PageFlags},
FrameAllocOptions, MAX_USERSPACE_VADDR, PAGE_SIZE, FrameAllocOptions, MAX_USERSPACE_VADDR, PAGE_SIZE,
}, },
@ -17,14 +17,14 @@ mod test_utils {
/// Sets up an empty `PageTable` in the specified mode. /// Sets up an empty `PageTable` in the specified mode.
#[track_caller] #[track_caller]
pub fn setup_page_table<M: PageTableMode>() -> PageTable<M> { pub fn setup_page_table<C: PageTableConfig>() -> PageTable<C> {
PageTable::<M>::empty() PageTable::<C>::empty()
} }
/// Maps a range of virtual addresses to physical addresses with specified properties. /// Maps a range of virtual addresses to physical addresses with specified properties.
#[track_caller] #[track_caller]
pub fn map_range<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( pub fn map_range<C: PageTableConfig>(
page_table: &PageTable<M, E, C>, page_table: &PageTable<C>,
virtual_range: Range<usize>, virtual_range: Range<usize>,
physical_range: Range<usize>, physical_range: Range<usize>,
page_property: PageProperty, page_property: PageProperty,
@ -38,7 +38,7 @@ mod test_utils {
/// Unmaps a range of virtual addresses. /// Unmaps a range of virtual addresses.
#[track_caller] #[track_caller]
pub fn unmap_range<M: PageTableMode>(page_table: &PageTable<M>, range: Range<usize>) { pub fn unmap_range<C: PageTableConfig>(page_table: &PageTable<C>, range: Range<usize>) {
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
unsafe { unsafe {
page_table page_table
@ -105,13 +105,24 @@ mod test_utils {
const NR_LEVELS: PagingLevel = 4; const NR_LEVELS: PagingLevel = 4;
const BASE_PAGE_SIZE: usize = PAGE_SIZE; const BASE_PAGE_SIZE: usize = PAGE_SIZE;
const ADDRESS_WIDTH: usize = 48; const ADDRESS_WIDTH: usize = 48;
const VA_SIGN_EXT: bool = true;
const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 3; const HIGHEST_TRANSLATION_LEVEL: PagingLevel = 3;
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>(); const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
} }
#[derive(Clone, Debug)]
pub struct TestPtConfig;
impl PageTableConfig for TestPtConfig {
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
type E = PageTableEntry;
type C = VeryHugePagingConsts;
}
/// Applies a protection operation to a range of virtual addresses within a PageTable. /// Applies a protection operation to a range of virtual addresses within a PageTable.
pub fn protect_range<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( pub fn protect_range<C: PageTableConfig>(
page_table: &PageTable<M, E, C>, page_table: &PageTable<C>,
range: &Range<Vaddr>, range: &Range<Vaddr>,
mut protect_op: impl FnMut(&mut PageProperty), mut protect_op: impl FnMut(&mut PageProperty),
) { ) {
@ -135,15 +146,16 @@ mod create_page_table {
#[ktest] #[ktest]
fn init_user_page_table() { fn init_user_page_table() {
let user_pt = setup_page_table::<UserMode>(); let user_pt = setup_page_table::<UserPtConfig>();
let preempt_guard = disable_preempt();
assert!(user_pt assert!(user_pt
.cursor(&disable_preempt(), &(0..MAX_USERSPACE_VADDR)) .cursor(&preempt_guard, &(0..MAX_USERSPACE_VADDR))
.is_ok()); .is_ok());
} }
#[ktest] #[ktest]
fn init_kernel_page_table() { fn init_kernel_page_table() {
let kernel_pt = setup_page_table::<KernelMode>(); let kernel_pt = setup_page_table::<KernelPtConfig>();
assert!(kernel_pt assert!(kernel_pt
.cursor( .cursor(
&disable_preempt(), &disable_preempt(),
@ -154,7 +166,13 @@ mod create_page_table {
#[ktest] #[ktest]
fn create_user_page_table() { fn create_user_page_table() {
let kernel_pt = PageTable::<KernelMode>::new_kernel_page_table(); use spin::Once;
// To make kernel PT `'static`, required for `create_user_page_table`.
static MOCK_KERNEL_PT: Once<PageTable<KernelPtConfig>> = Once::new();
MOCK_KERNEL_PT.call_once(PageTable::<KernelPtConfig>::new_kernel_page_table);
let kernel_pt = MOCK_KERNEL_PT.get().unwrap();
let user_pt = kernel_pt.create_user_page_table(); let user_pt = kernel_pt.create_user_page_table();
let guard = disable_preempt(); let guard = disable_preempt();
@ -181,7 +199,7 @@ mod range_checks {
#[ktest] #[ktest]
fn range_check() { fn range_check() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let valid_va = 0..PAGE_SIZE; let valid_va = 0..PAGE_SIZE;
let invalid_va = 0..(PAGE_SIZE + 1); let invalid_va = 0..(PAGE_SIZE + 1);
let kernel_va = LINEAR_MAPPING_BASE_VADDR..(LINEAR_MAPPING_BASE_VADDR + PAGE_SIZE); let kernel_va = LINEAR_MAPPING_BASE_VADDR..(LINEAR_MAPPING_BASE_VADDR + PAGE_SIZE);
@ -197,7 +215,7 @@ mod range_checks {
#[ktest] #[ktest]
fn boundary_conditions() { fn boundary_conditions() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
// Tests an empty range. // Tests an empty range.
@ -205,7 +223,7 @@ mod range_checks {
assert!(page_table.cursor_mut(&preempt_guard, &empty_range).is_err()); assert!(page_table.cursor_mut(&preempt_guard, &empty_range).is_err());
// Tests an out-of-range virtual address. // Tests an out-of-range virtual address.
let out_of_range = MAX_USERSPACE_VADDR..(MAX_USERSPACE_VADDR + PAGE_SIZE); let out_of_range = 0xffff_8000_0000_0000..0xffff_8000_0001_0000;
assert!(page_table assert!(page_table
.cursor_mut(&preempt_guard, &out_of_range) .cursor_mut(&preempt_guard, &out_of_range)
.is_err()); .is_err());
@ -219,7 +237,7 @@ mod range_checks {
#[ktest] #[ktest]
fn maximum_page_table_mapping() { fn maximum_page_table_mapping() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let max_address = 0x100000; let max_address = 0x100000;
let range = 0..max_address; let range = 0..max_address;
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
@ -245,7 +263,7 @@ mod range_checks {
#[ktest] #[ktest]
fn start_boundary_mapping() { fn start_boundary_mapping() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = 0..PAGE_SIZE; let range = 0..PAGE_SIZE;
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
let frame = FrameAllocOptions::default().alloc_frame().unwrap(); let frame = FrameAllocOptions::default().alloc_frame().unwrap();
@ -266,7 +284,7 @@ mod range_checks {
#[ktest] #[ktest]
fn end_boundary_mapping() { fn end_boundary_mapping() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = (MAX_USERSPACE_VADDR - PAGE_SIZE)..MAX_USERSPACE_VADDR; let range = (MAX_USERSPACE_VADDR - PAGE_SIZE)..MAX_USERSPACE_VADDR;
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
let frame = FrameAllocOptions::default().alloc_frame().unwrap(); let frame = FrameAllocOptions::default().alloc_frame().unwrap();
@ -288,7 +306,7 @@ mod range_checks {
#[ktest] #[ktest]
#[should_panic] #[should_panic]
fn overflow_boundary_mapping() { fn overflow_boundary_mapping() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = let range =
(MAX_USERSPACE_VADDR - (PAGE_SIZE / 2))..(MAX_USERSPACE_VADDR + (PAGE_SIZE / 2)); (MAX_USERSPACE_VADDR - (PAGE_SIZE / 2))..(MAX_USERSPACE_VADDR + (PAGE_SIZE / 2));
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
@ -310,7 +328,7 @@ mod page_properties {
/// Helper function to map a single page with given properties and verify the properties. /// Helper function to map a single page with given properties and verify the properties.
#[track_caller] #[track_caller]
fn check_map_with_property(prop: PageProperty) { fn check_map_with_property(prop: PageProperty) {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = PAGE_SIZE..(PAGE_SIZE * 2); let range = PAGE_SIZE..(PAGE_SIZE * 2);
let frame = FrameAllocOptions::default().alloc_frame().unwrap(); let frame = FrameAllocOptions::default().alloc_frame().unwrap();
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -328,7 +346,7 @@ mod page_properties {
#[ktest] #[ktest]
fn uncacheable_policy_mapping() { fn uncacheable_policy_mapping() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let virtual_range = PAGE_SIZE..(PAGE_SIZE * 2); let virtual_range = PAGE_SIZE..(PAGE_SIZE * 2);
let frame = FrameAllocOptions::default().alloc_frame().unwrap(); let frame = FrameAllocOptions::default().alloc_frame().unwrap();
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -405,7 +423,7 @@ mod different_page_sizes {
#[ktest] #[ktest]
fn different_page_sizes() { fn different_page_sizes() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
// 2MiB pages // 2MiB pages
@ -438,7 +456,7 @@ mod overlapping_mappings {
#[ktest] #[ktest]
fn overlapping_mappings() { fn overlapping_mappings() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range1 = PAGE_SIZE..(PAGE_SIZE * 2); let range1 = PAGE_SIZE..(PAGE_SIZE * 2);
let range2 = PAGE_SIZE..(PAGE_SIZE * 3); let range2 = PAGE_SIZE..(PAGE_SIZE * 3);
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
@ -470,7 +488,7 @@ mod overlapping_mappings {
#[ktest] #[ktest]
#[should_panic] #[should_panic]
fn unaligned_map() { fn unaligned_map() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = (PAGE_SIZE + 512)..(PAGE_SIZE * 2 + 512); let range = (PAGE_SIZE + 512)..(PAGE_SIZE * 2 + 512);
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
let frame = FrameAllocOptions::default().alloc_frame().unwrap(); let frame = FrameAllocOptions::default().alloc_frame().unwrap();
@ -493,8 +511,8 @@ mod navigation {
const FIRST_MAP_ADDR: Vaddr = PAGE_SIZE * 7; const FIRST_MAP_ADDR: Vaddr = PAGE_SIZE * 7;
const SECOND_MAP_ADDR: Vaddr = PAGE_SIZE * 512 * 512; const SECOND_MAP_ADDR: Vaddr = PAGE_SIZE * 512 * 512;
fn setup_page_table_with_two_frames() -> (PageTable<UserMode>, Frame<()>, Frame<()>) { fn setup_page_table_with_two_frames() -> (PageTable<UserPtConfig>, Frame<()>, Frame<()>) {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -583,7 +601,7 @@ mod tracked_mapping {
#[ktest] #[ktest]
fn tracked_map_unmap() { fn tracked_map_unmap() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = PAGE_SIZE..(PAGE_SIZE * 2); let range = PAGE_SIZE..(PAGE_SIZE * 2);
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -627,7 +645,7 @@ mod tracked_mapping {
#[ktest] #[ktest]
fn remapping_same_range() { fn remapping_same_range() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = PAGE_SIZE..(PAGE_SIZE * 2); let range = PAGE_SIZE..(PAGE_SIZE * 2);
let initial_prop = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let initial_prop = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
let new_prop = PageProperty::new_user(PageFlags::R, CachePolicy::Writeback); let new_prop = PageProperty::new_user(PageFlags::R, CachePolicy::Writeback);
@ -666,7 +684,7 @@ mod untracked_mapping {
#[ktest] #[ktest]
fn untracked_map_unmap() { fn untracked_map_unmap() {
let kernel_pt = setup_page_table::<KernelMode>(); let kernel_pt = setup_page_table::<KernelPtConfig>();
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
const UNTRACKED_OFFSET: usize = LINEAR_MAPPING_BASE_VADDR; const UNTRACKED_OFFSET: usize = LINEAR_MAPPING_BASE_VADDR;
@ -739,15 +757,13 @@ mod untracked_mapping {
#[ktest] #[ktest]
fn untracked_large_protect_query() { fn untracked_large_protect_query() {
let kernel_pt = PageTable::<KernelMode, PageTableEntry, VeryHugePagingConsts>::empty(); let kernel_pt = PageTable::<TestPtConfig>::empty();
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
const UNTRACKED_OFFSET: usize = crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR;
let gmult = 512 * 512; let gmult = 512 * 512;
let from_ppn = gmult - 512..gmult + gmult + 514; let from_ppn = gmult - 512..gmult + gmult + 514;
let to_ppn = gmult - 512 - 512..gmult + gmult - 512 + 514; let to_ppn = gmult - 512 - 512..gmult + gmult - 512 + 514;
let from = UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.start let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
..UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.end;
let to = PAGE_SIZE * to_ppn.start..PAGE_SIZE * to_ppn.end; let to = PAGE_SIZE * to_ppn.start..PAGE_SIZE * to_ppn.end;
let mapped_pa_of_va = |va: Vaddr| va - (from.start - to.start); let mapped_pa_of_va = |va: Vaddr| va - (from.start - to.start);
let prop = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let prop = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
@ -778,8 +794,8 @@ mod untracked_mapping {
} }
} }
let protect_ppn_range = from_ppn.start + 18..from_ppn.start + 20; let protect_ppn_range = from_ppn.start + 18..from_ppn.start + 20;
let protect_va_range = UNTRACKED_OFFSET + PAGE_SIZE * protect_ppn_range.start let protect_va_range =
..UNTRACKED_OFFSET + PAGE_SIZE * protect_ppn_range.end; PAGE_SIZE * protect_ppn_range.start..PAGE_SIZE * protect_ppn_range.end;
protect_range(&kernel_pt, &protect_va_range, |p| p.flags -= PageFlags::W); protect_range(&kernel_pt, &protect_va_range, |p| p.flags -= PageFlags::W);
@ -806,8 +822,8 @@ mod untracked_mapping {
{ {
assert_item_is_untracked_map( assert_item_is_untracked_map(
item, item,
UNTRACKED_OFFSET + i * PAGE_SIZE, i * PAGE_SIZE,
mapped_pa_of_va(UNTRACKED_OFFSET + i * PAGE_SIZE), mapped_pa_of_va(i * PAGE_SIZE),
PAGE_SIZE, // Assumes protection splits huge pages if necessary. PAGE_SIZE, // Assumes protection splits huge pages if necessary.
PageProperty::new_user(PageFlags::R, CachePolicy::Writeback), PageProperty::new_user(PageFlags::R, CachePolicy::Writeback),
); );
@ -838,7 +854,7 @@ mod full_unmap_verification {
#[ktest] #[ktest]
fn full_unmap() { fn full_unmap() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = 0..(PAGE_SIZE * 100); let range = 0..(PAGE_SIZE * 100);
let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let page_property = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -880,7 +896,7 @@ mod protection_and_query {
#[ktest] #[ktest]
fn base_protect_query() { fn base_protect_query() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let from_ppn = 1..1000; let from_ppn = 1..1000;
let virtual_range = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end; let virtual_range = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -931,7 +947,7 @@ mod protection_and_query {
#[ktest] #[ktest]
fn test_protect_next_empty_entry() { fn test_protect_next_empty_entry() {
let page_table = PageTable::<UserMode>::empty(); let page_table = PageTable::<UserPtConfig>::empty();
let range = 0x1000..0x2000; let range = 0x1000..0x2000;
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -946,7 +962,7 @@ mod protection_and_query {
#[ktest] #[ktest]
fn test_protect_next_child_table_with_children() { fn test_protect_next_child_table_with_children() {
let page_table = setup_page_table::<UserMode>(); let page_table = setup_page_table::<UserPtConfig>();
let range = 0x1000..0x3000; // Range potentially spanning intermediate tables let range = 0x1000..0x3000; // Range potentially spanning intermediate tables
let preempt_guard = disable_preempt(); let preempt_guard = disable_preempt();
@ -998,7 +1014,7 @@ mod boot_pt {
// Confirms the mapping using page_walk. // Confirms the mapping using page_walk.
let root_paddr = boot_pt.root_address(); let root_paddr = boot_pt.root_address();
assert_eq!( assert_eq!(
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from_virt + 1) }, unsafe { page_walk::<KernelPtConfig>(root_paddr, from_virt + 1) },
Some((to_phys * PAGE_SIZE + 1, page_property)) Some((to_phys * PAGE_SIZE + 1, page_property))
); );
} }
@ -1055,7 +1071,7 @@ mod boot_pt {
let prop1 = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback); let prop1 = PageProperty::new_user(PageFlags::RW, CachePolicy::Writeback);
unsafe { boot_pt.map_base_page(from1, to_phys1, prop1) }; unsafe { boot_pt.map_base_page(from1, to_phys1, prop1) };
assert_eq!( assert_eq!(
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from1 + 1) }, unsafe { page_walk::<KernelPtConfig>(root_paddr, from1 + 1) },
Some((to_phys1 * PAGE_SIZE + 1, prop1)) Some((to_phys1 * PAGE_SIZE + 1, prop1))
); );
@ -1064,7 +1080,7 @@ mod boot_pt {
let expected_prop1_protected = let expected_prop1_protected =
PageProperty::new_user(PageFlags::RX, CachePolicy::Writeback); PageProperty::new_user(PageFlags::RX, CachePolicy::Writeback);
assert_eq!( assert_eq!(
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from1 + 1) }, unsafe { page_walk::<KernelPtConfig>(root_paddr, from1 + 1) },
Some((to_phys1 * PAGE_SIZE + 1, expected_prop1_protected)) Some((to_phys1 * PAGE_SIZE + 1, expected_prop1_protected))
); );
@ -1074,7 +1090,7 @@ mod boot_pt {
let prop2 = PageProperty::new_user(PageFlags::RX, CachePolicy::Uncacheable); let prop2 = PageProperty::new_user(PageFlags::RX, CachePolicy::Uncacheable);
unsafe { boot_pt.map_base_page(from2, to_phys2, prop2) }; unsafe { boot_pt.map_base_page(from2, to_phys2, prop2) };
assert_eq!( assert_eq!(
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from2 + 2) }, unsafe { page_walk::<KernelPtConfig>(root_paddr, from2 + 2) },
Some((to_phys2 * PAGE_SIZE + 2, prop2)) Some((to_phys2 * PAGE_SIZE + 2, prop2))
); );
@ -1083,7 +1099,7 @@ mod boot_pt {
let expected_prop2_protected = let expected_prop2_protected =
PageProperty::new_user(PageFlags::RW, CachePolicy::Uncacheable); PageProperty::new_user(PageFlags::RW, CachePolicy::Uncacheable);
assert_eq!( assert_eq!(
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, from2 + 2) }, unsafe { page_walk::<KernelPtConfig>(root_paddr, from2 + 2) },
Some((to_phys2 * PAGE_SIZE + 2, expected_prop2_protected)) Some((to_phys2 * PAGE_SIZE + 2, expected_prop2_protected))
); );
} }

View File

@ -11,6 +11,7 @@
use core::{ops::Range, sync::atomic::Ordering}; use core::{ops::Range, sync::atomic::Ordering};
use super::page_table::PageTableConfig;
use crate::{ use crate::{
arch::mm::{current_page_table_paddr, PageTableEntry, PagingConsts}, arch::mm::{current_page_table_paddr, PageTableEntry, PagingConsts},
cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu}, cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
@ -18,7 +19,7 @@ use crate::{
mm::{ mm::{
io::Fallible, io::Fallible,
kspace::KERNEL_PAGE_TABLE, kspace::KERNEL_PAGE_TABLE,
page_table::{self, PageTable, PageTableItem, UserMode}, page_table::{self, PageTable, PageTableItem},
tlb::{TlbFlushOp, TlbFlusher}, tlb::{TlbFlushOp, TlbFlusher},
PageProperty, UFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR, PageProperty, UFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR,
}, },
@ -64,7 +65,7 @@ use crate::{
/// [`UserMode::execute`]: crate::user::UserMode::execute /// [`UserMode::execute`]: crate::user::UserMode::execute
#[derive(Debug)] #[derive(Debug)]
pub struct VmSpace { pub struct VmSpace {
pt: PageTable<UserMode>, pt: PageTable<UserPtConfig>,
cpus: AtomicCpuSet, cpus: AtomicCpuSet,
} }
@ -198,7 +199,7 @@ impl Default for VmSpace {
/// It exclusively owns a sub-tree of the page table, preventing others from /// It exclusively owns a sub-tree of the page table, preventing others from
/// reading or modifying the same sub-tree. Two read-only cursors can not be /// reading or modifying the same sub-tree. Two read-only cursors can not be
/// created from the same virtual address range either. /// created from the same virtual address range either.
pub struct Cursor<'a>(page_table::Cursor<'a, UserMode, PageTableEntry, PagingConsts>); pub struct Cursor<'a>(page_table::Cursor<'a, UserPtConfig>);
impl Iterator for Cursor<'_> { impl Iterator for Cursor<'_> {
type Item = VmItem; type Item = VmItem;
@ -245,7 +246,7 @@ impl Cursor<'_> {
/// It exclusively owns a sub-tree of the page table, preventing others from /// It exclusively owns a sub-tree of the page table, preventing others from
/// reading or modifying the same sub-tree. /// reading or modifying the same sub-tree.
pub struct CursorMut<'a> { pub struct CursorMut<'a> {
pt_cursor: page_table::CursorMut<'a, UserMode, PageTableEntry, PagingConsts>, pt_cursor: page_table::CursorMut<'a, UserPtConfig>,
// We have a read lock so the CPU set in the flusher is always a superset // We have a read lock so the CPU set in the flusher is always a superset
// of actual activated CPUs. // of actual activated CPUs.
flusher: TlbFlusher<'a, DisabledPreemptGuard>, flusher: TlbFlusher<'a, DisabledPreemptGuard>,
@ -476,3 +477,13 @@ impl TryFrom<PageTableItem> for VmItem {
} }
} }
} }
#[derive(Clone, Debug)]
pub(crate) struct UserPtConfig {}
impl PageTableConfig for UserPtConfig {
const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
type E = PageTableEntry;
type C = PagingConsts;
}