Escalate page table consts to be paging consts

This commit is contained in:
Zhang Junyang
2024-05-04 11:02:49 +08:00
committed by Tate, Hongliang Tian
parent 2dbeb92326
commit 351e08c897
12 changed files with 167 additions and 163 deletions

View File

@ -6,7 +6,7 @@ use core::mem::size_of;
use log::warn;
use pod::Pod;
use super::second_stage::{DeviceMode, PageTableConsts, PageTableEntry, PageTableFlags};
use super::second_stage::{DeviceMode, PageTableEntry, PageTableFlags, PagingConsts};
use crate::{
bus::pci::PciDeviceLocation,
vm::{
@ -123,7 +123,7 @@ impl RootTable {
pub fn specify_device_page_table(
&mut self,
device_id: PciDeviceLocation,
page_table: PageTable<DeviceMode, PageTableEntry, PageTableConsts>,
page_table: PageTable<DeviceMode, PageTableEntry, PagingConsts>,
) {
let context_table = self.get_or_create_context_table(device_id);
@ -233,7 +233,7 @@ pub enum AddressWidth {
pub struct ContextTable {
/// Total 32 devices, each device has 8 functions.
entries_frame: VmFrame,
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PageTableConsts>>,
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
}
impl ContextTable {
@ -251,7 +251,7 @@ impl ContextTable {
fn get_or_create_page_table(
&mut self,
device: PciDeviceLocation,
) -> &mut PageTable<DeviceMode, PageTableEntry, PageTableConsts> {
) -> &mut PageTable<DeviceMode, PageTableEntry, PagingConsts> {
let bus_entry = self
.entries_frame
.read_val::<ContextEntry>(
@ -260,7 +260,7 @@ impl ContextTable {
.unwrap();
if !bus_entry.is_present() {
let table = PageTable::<DeviceMode, PageTableEntry, PageTableConsts>::empty();
let table = PageTable::<DeviceMode, PageTableEntry, PagingConsts>::empty();
let address = table.root_paddr();
self.page_tables.insert(address, table);
let entry = ContextEntry(address as u128 | 3 | 0x1_0000_0000_0000_0000);

View File

@ -6,7 +6,7 @@ mod remapping;
mod second_stage;
use log::info;
use second_stage::{DeviceMode, PageTableConsts, PageTableEntry};
use second_stage::{DeviceMode, PageTableEntry, PagingConsts};
use spin::Once;
use crate::{
@ -61,7 +61,7 @@ pub(crate) fn unmap(daddr: Daddr) -> Result<(), IommuError> {
pub(crate) fn init() -> Result<(), IommuError> {
let mut root_table = RootTable::new();
// For all PCI Device, use the same page table.
let page_table = PageTable::<DeviceMode, PageTableEntry, PageTableConsts>::empty();
let page_table = PageTable::<DeviceMode, PageTableEntry, PagingConsts>::empty();
for table in PciDeviceLocation::all() {
root_table.specify_device_page_table(table, unsafe { page_table.shallow_copy() })
}

View File

@ -6,10 +6,9 @@ use pod::Pod;
use crate::vm::{
page_table::{
CachePolicy, MapInfo, MapProperty, MapStatus, PageTableConstsTrait, PageTableEntryTrait,
PageTableMode,
CachePolicy, MapInfo, MapProperty, MapStatus, PageTableEntryTrait, PageTableMode,
},
Paddr, Vaddr, VmPerm,
Paddr, PagingConstsTrait, Vaddr, VmPerm,
};
/// The page table used by iommu maps the device address
@ -23,13 +22,13 @@ impl PageTableMode for DeviceMode {
}
#[derive(Debug)]
pub(super) struct PageTableConsts {}
pub(super) struct PagingConsts {}
impl PageTableConstsTrait for PageTableConsts {
impl PagingConstsTrait for PagingConsts {
const BASE_PAGE_SIZE: usize = 4096;
const NR_LEVELS: usize = 3;
const HIGHEST_TRANSLATION_LEVEL: usize = 1;
const ENTRY_SIZE: usize = core::mem::size_of::<PageTableEntry>();
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
}
bitflags::bitflags! {

View File

@ -6,22 +6,20 @@ use pod::Pod;
use x86_64::{instructions::tlb, structures::paging::PhysFrame, VirtAddr};
use crate::vm::{
page_table::{
CachePolicy, MapInfo, MapProperty, MapStatus, PageTableConstsTrait, PageTableEntryTrait,
},
Paddr, Vaddr, VmPerm,
page_table::{CachePolicy, MapInfo, MapProperty, MapStatus, PageTableEntryTrait},
Paddr, PagingConstsTrait, Vaddr, VmPerm,
};
pub(crate) const NR_ENTRIES_PER_PAGE: usize = 512;
#[derive(Debug)]
pub struct PageTableConsts {}
pub struct PagingConsts {}
impl PageTableConstsTrait for PageTableConsts {
impl PagingConstsTrait for PagingConsts {
const BASE_PAGE_SIZE: usize = 4096;
const NR_LEVELS: usize = 4;
const HIGHEST_TRANSLATION_LEVEL: usize = 2;
const ENTRY_SIZE: usize = core::mem::size_of::<PageTableEntry>();
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
}
bitflags::bitflags! {

View File

@ -5,15 +5,12 @@
use align_ext::AlignExt;
use spin::Once;
use super::page_table::PageTableConstsTrait;
use crate::{
arch::mm::{PageTableConsts, PageTableEntry},
vm::{
page_table::{page_walk, CachePolicy, KernelMode, MapProperty, PageTable},
space::VmPerm,
MemoryRegionType, Paddr, Vaddr, PAGE_SIZE,
},
use super::{
page_table::{nr_ptes_per_node, page_walk, CachePolicy, KernelMode, MapProperty, PageTable},
space::VmPerm,
MemoryRegionType, Paddr, Vaddr, PAGE_SIZE,
};
use crate::arch::mm::{PageTableEntry, PagingConsts};
/// The base address of the linear mapping of all physical
/// memory in the kernel address space.
@ -35,7 +32,7 @@ pub fn vaddr_to_paddr(va: Vaddr) -> Option<Paddr> {
} else {
let root_paddr = crate::arch::mm::current_page_table_paddr();
// Safety: the root page table is valid since we read it from the register.
unsafe { page_walk::<PageTableEntry, PageTableConsts>(root_paddr, va).map(|(pa, _)| pa) }
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, va).map(|(pa, _)| pa) }
}
}
@ -44,7 +41,7 @@ pub(crate) fn paddr_to_vaddr(pa: Paddr) -> usize {
pa + LINEAR_MAPPING_BASE_VADDR
}
pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PageTableConsts>> =
pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PagingConsts>> =
Once::new();
/// Initialize the kernel page table.
@ -58,7 +55,7 @@ pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PageTab
pub fn init_kernel_page_table() {
let kpt = PageTable::<KernelMode>::empty();
kpt.make_shared_tables(
PageTableConsts::NR_ENTRIES_PER_FRAME / 2..PageTableConsts::NR_ENTRIES_PER_FRAME,
nr_ptes_per_node::<PagingConsts>() / 2..nr_ptes_per_node::<PagingConsts>(),
);
let regions = crate::boot::memory_regions();
// Do linear mappings for the kernel.

View File

@ -20,7 +20,7 @@ pub(crate) mod page_table;
mod space;
use alloc::{borrow::ToOwned, vec::Vec};
use core::ops::Range;
use core::{fmt::Debug, ops::Range};
use spin::Once;
@ -35,8 +35,32 @@ pub use self::{
pub(crate) use self::{kspace::paddr_to_vaddr, page_table::PageTable};
use crate::boot::memory_region::{MemoryRegion, MemoryRegionType};
/// DEPRECATED: use the property of `VmFrame` instead.
/// The size of a [`VmFrame`].
pub const PAGE_SIZE: usize = 0x1000;
/// A minimal set of constants that determines the paging system.
/// This provides an abstraction over most paging modes in common architectures.
pub(crate) trait PagingConstsTrait: Debug + 'static {
/// The smallest page size.
/// This is also the page size at level 1 page tables.
const BASE_PAGE_SIZE: usize;
/// The number of levels in the page table.
/// The numbering of levels goes from deepest node to the root node. For example,
/// the level 1 to 5 on AMD64 corresponds to Page Tables, Page Directory Tables,
/// Page Directory Pointer Tables, Page-Map Level-4 Table, and Page-Map Level-5
/// Table, respectively.
const NR_LEVELS: usize;
/// The highest level that a PTE can be directly used to translate a VA.
/// This affects the the largest page size supported by the page table.
const HIGHEST_TRANSLATION_LEVEL: usize;
/// The size of a PTE.
const PTE_SIZE: usize;
}
/// The maximum virtual address of user space (non inclusive).
///
/// Typicall 64-bit systems have at least 48-bit virtual address space.

View File

@ -56,8 +56,9 @@ use core::{any::TypeId, ops::Range};
use align_ext::AlignExt;
use super::{
Child, KernelMode, MapInfo, MapOp, MapProperty, PageTable, PageTableConstsTrait,
PageTableEntryTrait, PageTableError, PageTableFrame, PageTableMode,
nr_ptes_per_node, page_size, pte_index, Child, KernelMode, MapInfo, MapOp, MapProperty,
PageTable, PageTableEntryTrait, PageTableError, PageTableFrame, PageTableMode,
PagingConstsTrait,
};
use crate::{
sync::{ArcSpinLockGuard, SpinLock},
@ -77,9 +78,9 @@ use crate::{
/// that we modify the tree while traversing it. We use a guard stack to
/// simulate the recursion, and adpot a page table locking protocol to
/// provide concurrency.
pub(crate) struct CursorMut<'a, M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait>
pub(crate) struct CursorMut<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
pt: &'a PageTable<M, E, C>,
@ -90,9 +91,9 @@ where
barrier_va: Range<Vaddr>, // virtual address range that is locked
}
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait> CursorMut<'a, M, E, C>
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> CursorMut<'a, M, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
/// Create a cursor exclusively owning the locks for the given range.
@ -131,8 +132,8 @@ where
// While going down, previous guards of too-high levels will be released.
loop {
let level_too_high = {
let start_idx = C::in_frame_index(va.start, cursor.level);
let end_idx = C::in_frame_index(va.end - 1, cursor.level);
let start_idx = pte_index::<C>(va.start, cursor.level);
let end_idx = pte_index::<C>(va.end - 1, cursor.level);
start_idx == end_idx
};
if !level_too_high || !cursor.cur_child().is_pt() {
@ -153,8 +154,8 @@ where
assert!(self.barrier_va.contains(&va));
assert!(va % C::BASE_PAGE_SIZE == 0);
loop {
let cur_node_start = self.va & !(C::page_size(self.level + 1) - 1);
let cur_node_end = cur_node_start + C::page_size(self.level + 1);
let cur_node_start = self.va & !(page_size::<C>(self.level + 1) - 1);
let cur_node_end = cur_node_start + page_size::<C>(self.level + 1);
// If the address is within the current node, we can jump directly.
if cur_node_start <= va && va < cur_node_end {
self.va = va;
@ -188,8 +189,8 @@ where
assert!(end <= self.barrier_va.end);
// Go down if not applicable.
while self.level > C::HIGHEST_TRANSLATION_LEVEL
|| self.va % C::page_size(self.level) != 0
|| self.va + C::page_size(self.level) > end
|| self.va % page_size::<C>(self.level) != 0
|| self.va + page_size::<C>(self.level) > end
{
self.level_down(Some(prop));
continue;
@ -235,9 +236,9 @@ where
TypeId::of::<M>() == TypeId::of::<KernelMode>() && self.level >= C::NR_LEVELS - 1;
if self.level > C::HIGHEST_TRANSLATION_LEVEL
|| is_kernel_shared_node
|| self.va % C::page_size(self.level) != 0
|| self.va + C::page_size(self.level) > end
|| pa % C::page_size(self.level) != 0
|| self.va % page_size::<C>(self.level) != 0
|| self.va + page_size::<C>(self.level) > end
|| pa % page_size::<C>(self.level) != 0
{
self.level_down(Some(prop));
continue;
@ -247,7 +248,7 @@ where
let level = self.level;
self.cur_node_mut()
.set_child(idx, Child::Untracked(pa), Some(prop), level > 1);
pa += C::page_size(level);
pa += page_size::<C>(level);
self.move_forward();
}
}
@ -270,7 +271,7 @@ where
while self.va < end {
// Skip if it is already invalid.
if self.cur_child().is_none() {
if self.va + C::page_size(self.level) > end {
if self.va + page_size::<C>(self.level) > end {
break;
}
self.move_forward();
@ -282,8 +283,8 @@ where
let is_kernel_shared_node =
TypeId::of::<M>() == TypeId::of::<KernelMode>() && self.level >= C::NR_LEVELS - 1;
if is_kernel_shared_node
|| self.va % C::page_size(self.level) != 0
|| self.va + C::page_size(self.level) > end
|| self.va % page_size::<C>(self.level) != 0
|| self.va + page_size::<C>(self.level) > end
{
self.level_down(Some(MapProperty::new_invalid()));
continue;
@ -330,8 +331,8 @@ where
self.level_down(None);
continue;
}
let vaddr_not_fit =
self.va % C::page_size(self.level) != 0 || self.va + C::page_size(self.level) > end;
let vaddr_not_fit = self.va % page_size::<C>(self.level) != 0
|| self.va + page_size::<C>(self.level) > end;
let cur_pte_info = self.read_cur_pte_info();
let protected_prop = op(cur_pte_info);
// Go down if the page size is too big and we are protecting part
@ -378,7 +379,7 @@ where
return Some(PageTableQueryResult::MappedUntyped {
va,
pa,
len: C::page_size(level),
len: page_size::<C>(level),
info: map_info,
});
}
@ -386,7 +387,7 @@ where
self.move_forward();
return Some(PageTableQueryResult::NotMapped {
va,
len: C::page_size(level),
len: page_size::<C>(level),
});
}
}
@ -413,9 +414,9 @@ where
/// If reached the end of a page table frame, it leads itself up to the next frame of the parent
/// frame if possible.
fn move_forward(&mut self) {
let page_size = C::page_size(self.level);
let page_size = page_size::<C>(self.level);
let next_va = self.va.align_down(page_size) + page_size;
while self.level < self.guard_level && C::in_frame_index(next_va, self.level) == 0 {
while self.level < self.guard_level && pte_index::<C>(next_va, self.level) == 0 {
self.level_up();
}
self.va = next_va;
@ -452,7 +453,7 @@ where
debug_assert!(self.level > 1);
// Check if the child frame exists.
let nxt_lvl_frame = {
let idx = C::in_frame_index(self.va, self.level);
let idx = pte_index::<C>(self.va, self.level);
let child = self.cur_child();
if let Child::PageTable(nxt_lvl_frame) = child {
Some(nxt_lvl_frame.clone())
@ -499,7 +500,7 @@ where
}
fn cur_idx(&self) -> usize {
C::in_frame_index(self.va, self.level)
pte_index::<C>(self.va, self.level)
}
fn cur_child(&self) -> &Child<E, C> {
@ -511,10 +512,9 @@ where
}
}
impl<M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait> Drop
for CursorMut<'_, M, E, C>
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Drop for CursorMut<'_, M, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
fn drop(&mut self) {
@ -566,17 +566,17 @@ pub(crate) enum PageTableQueryResult {
/// The read-only cursor for traversal over the page table.
///
/// It implements the `Iterator` trait to provide a convenient way to query over the page table.
pub(crate) struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait>
pub(crate) struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
inner: CursorMut<'a, M, E, C>,
}
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait> Cursor<'a, M, E, C>
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Cursor<'a, M, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
pub(super) fn new(
@ -587,10 +587,10 @@ where
}
}
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait> Iterator
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Iterator
for Cursor<'a, M, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
type Item = PageTableQueryResult;

View File

@ -2,35 +2,35 @@
use alloc::{boxed::Box, sync::Arc};
use super::{MapInfo, MapProperty, PageTableConstsTrait, PageTableEntryTrait};
use super::{nr_ptes_per_node, page_size, MapInfo, MapProperty, PageTableEntryTrait};
use crate::{
sync::SpinLock,
vm::{Paddr, VmAllocOptions, VmFrame},
vm::{Paddr, PagingConstsTrait, VmAllocOptions, VmFrame},
};
/// A page table frame.
/// It's also frequently referred to as a page table in many architectural documentations.
/// Cloning a page table frame will create a deep copy of the page table.
#[derive(Debug)]
pub(super) struct PageTableFrame<E: PageTableEntryTrait, C: PageTableConstsTrait>
pub(super) struct PageTableFrame<E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
inner: VmFrame,
/// TODO: all the following fields can be removed if frame metadata is introduced.
/// Here we allow 2x space overhead each frame temporarily.
#[allow(clippy::type_complexity)]
children: Box<[Child<E, C>; C::NR_ENTRIES_PER_FRAME]>,
children: Box<[Child<E, C>; nr_ptes_per_node::<C>()]>,
nr_valid_children: usize,
}
pub(super) type PtfRef<E, C> = Arc<SpinLock<PageTableFrame<E, C>>>;
#[derive(Debug)]
pub(super) enum Child<E: PageTableEntryTrait, C: PageTableConstsTrait>
pub(super) enum Child<E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
PageTable(PtfRef<E, C>),
@ -40,9 +40,9 @@ where
None,
}
impl<E: PageTableEntryTrait, C: PageTableConstsTrait> Child<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
pub(super) fn is_pt(&self) -> bool {
@ -78,9 +78,9 @@ where
}
}
impl<E: PageTableEntryTrait, C: PageTableConstsTrait> Clone for Child<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Clone for Child<E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
/// This is a shallow copy.
@ -94,9 +94,9 @@ where
}
}
impl<E: PageTableEntryTrait, C: PageTableConstsTrait> PageTableFrame<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableFrame<E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
pub(super) fn new() -> Self {
@ -112,7 +112,7 @@ where
}
pub(super) fn child(&self, idx: usize) -> &Child<E, C> {
debug_assert!(idx < C::NR_ENTRIES_PER_FRAME);
debug_assert!(idx < nr_ptes_per_node::<C>());
&self.children[idx]
}
@ -129,15 +129,15 @@ where
/// Split the untracked huge page mapped at `idx` to smaller pages.
pub(super) fn split_untracked_huge(&mut self, cur_level: usize, idx: usize) {
debug_assert!(idx < C::NR_ENTRIES_PER_FRAME);
debug_assert!(idx < nr_ptes_per_node::<C>());
debug_assert!(cur_level > 1);
let Child::Untracked(pa) = self.children[idx] else {
panic!("split_untracked_huge: not an untyped huge page");
};
let info = self.read_pte_info(idx);
let mut new_frame = Self::new();
for i in 0..C::NR_ENTRIES_PER_FRAME {
let small_pa = pa + i * C::page_size(cur_level - 1);
for i in 0..nr_ptes_per_node::<C>() {
let small_pa = pa + i * page_size::<C>(cur_level - 1);
new_frame.set_child(
i,
Child::Untracked(small_pa),
@ -162,7 +162,7 @@ where
prop: Option<MapProperty>,
huge: bool,
) {
assert!(idx < C::NR_ENTRIES_PER_FRAME);
assert!(idx < nr_ptes_per_node::<C>());
// Safety: the index is within the bound and the PTE to be written is valid.
// And the physical address of PTE points to initialized memory.
// This applies to all the following `write_pte` invocations.
@ -211,7 +211,7 @@ where
}
fn read_pte(&self, idx: usize) -> E {
assert!(idx < C::NR_ENTRIES_PER_FRAME);
assert!(idx < nr_ptes_per_node::<C>());
// Safety: the index is within the bound and PTE is plain-old-data.
unsafe { (self.inner.as_ptr() as *const E).add(idx).read() }
}
@ -228,9 +228,9 @@ where
}
}
impl<E: PageTableEntryTrait, C: PageTableConstsTrait> Clone for PageTableFrame<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Clone for PageTableFrame<E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
/// Make a deep copy of the page table.

View File

@ -3,10 +3,10 @@
use alloc::sync::Arc;
use core::{fmt::Debug, marker::PhantomData, ops::Range, panic};
use super::{paddr_to_vaddr, Paddr, PagingConstsTrait, Vaddr, VmPerm};
use crate::{
arch::mm::{activate_page_table, PageTableConsts, PageTableEntry},
arch::mm::{activate_page_table, PageTableEntry, PagingConsts},
sync::SpinLock,
vm::{paddr_to_vaddr, Paddr, Vaddr, VmPerm},
};
mod properties;
@ -57,24 +57,47 @@ impl PageTableMode for KernelMode {
const VADDR_RANGE: Range<Vaddr> = super::KERNEL_BASE_VADDR..super::KERNEL_END_VADDR;
}
// Here are some const values that are determined by the paging constants.
/// The page size at a given level.
pub(crate) const fn page_size<C: PagingConstsTrait>(level: usize) -> usize {
C::BASE_PAGE_SIZE << (nr_pte_index_bits::<C>() * (level - 1))
}
/// The number of page table entries per page table frame.
pub(crate) const fn nr_ptes_per_node<C: PagingConstsTrait>() -> usize {
C::BASE_PAGE_SIZE / C::PTE_SIZE
}
/// The number of virtual address bits used to index a PTE in a frame.
const fn nr_pte_index_bits<C: PagingConstsTrait>() -> usize {
nr_ptes_per_node::<C>().ilog2() as usize
}
/// The index of a VA's PTE in a page table frame at the given level.
const fn pte_index<C: PagingConstsTrait>(va: Vaddr, level: usize) -> usize {
va >> (C::BASE_PAGE_SIZE.ilog2() as usize + nr_pte_index_bits::<C>() * (level - 1))
& (nr_ptes_per_node::<C>() - 1)
}
/// A handle to a page table.
/// A page table can track the lifetime of the mapped physical frames.
#[derive(Debug)]
pub(crate) struct PageTable<
M: PageTableMode,
E: PageTableEntryTrait = PageTableEntry,
C: PageTableConstsTrait = PageTableConsts,
C: PagingConstsTrait = PagingConsts,
> where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
root_frame: PtfRef<E, C>,
_phantom: PhantomData<M>,
}
impl<E: PageTableEntryTrait, C: PageTableConstsTrait> PageTable<UserMode, E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<UserMode, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
pub(crate) fn activate(&self) {
@ -103,7 +126,7 @@ where
};
let root_frame = cursor.leak_root_guard().unwrap();
let mut new_root_frame = PageTableFrame::<E, C>::new();
let half_of_entries = C::NR_ENTRIES_PER_FRAME / 2;
let half_of_entries = nr_ptes_per_node::<C>() / 2;
for i in 0..half_of_entries {
// This is user space, deep copy the child.
match root_frame.child(i) {
@ -128,7 +151,7 @@ where
}
}
}
for i in half_of_entries..C::NR_ENTRIES_PER_FRAME {
for i in half_of_entries..nr_ptes_per_node::<C>() {
// This is kernel space, share the child.
new_root_frame.set_child(
i,
@ -144,9 +167,9 @@ where
}
}
impl<E: PageTableEntryTrait, C: PageTableConstsTrait> PageTable<KernelMode, E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<KernelMode, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
/// Create a new user page table.
@ -159,7 +182,7 @@ where
pub(crate) fn create_user_page_table(&self) -> PageTable<UserMode, E, C> {
let mut new_root_frame = PageTableFrame::<E, C>::new();
let root_frame = self.root_frame.lock();
for i in C::NR_ENTRIES_PER_FRAME / 2..C::NR_ENTRIES_PER_FRAME {
for i in nr_ptes_per_node::<C>() / 2..nr_ptes_per_node::<C>() {
new_root_frame.set_child(
i,
root_frame.child(i).clone(),
@ -180,10 +203,10 @@ where
/// instead of the virtual address range.
pub(crate) fn make_shared_tables(&self, root_index: Range<usize>) {
let start = root_index.start;
debug_assert!(start >= C::NR_ENTRIES_PER_FRAME / 2);
debug_assert!(start < C::NR_ENTRIES_PER_FRAME);
debug_assert!(start >= nr_ptes_per_node::<C>() / 2);
debug_assert!(start < nr_ptes_per_node::<C>());
let end = root_index.end;
debug_assert!(end <= C::NR_ENTRIES_PER_FRAME);
debug_assert!(end <= nr_ptes_per_node::<C>());
let mut root_frame = self.root_frame.lock();
for i in start..end {
let no_such_child = root_frame.child(i).is_none();
@ -205,9 +228,9 @@ where
}
}
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait> PageTable<M, E, C>
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> PageTable<M, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
@ -296,9 +319,9 @@ where
}
}
impl<M: PageTableMode, E: PageTableEntryTrait, C: PageTableConstsTrait> Clone for PageTable<M, E, C>
impl<M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Clone for PageTable<M, E, C>
where
[(); C::NR_ENTRIES_PER_FRAME]:,
[(); nr_ptes_per_node::<C>()]:,
[(); C::NR_LEVELS]:,
{
fn clone(&self) -> Self {
@ -318,14 +341,14 @@ where
///
/// The caller must ensure that the root_paddr is a valid pointer to the root
/// page table frame.
pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PageTableConstsTrait>(
pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
root_paddr: Paddr,
vaddr: Vaddr,
) -> Option<(Paddr, MapInfo)> {
let mut cur_level = C::NR_LEVELS;
let mut cur_pte = {
let frame_addr = paddr_to_vaddr(root_paddr);
let offset = C::in_frame_index(vaddr, cur_level);
let offset = pte_index::<C>(vaddr, cur_level);
// Safety: The offset does not exceed the value of PAGE_SIZE.
unsafe { (frame_addr as *const E).add(offset).read() }
};
@ -341,7 +364,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PageTableConstsTrait>(
cur_level -= 1;
cur_pte = {
let frame_addr = paddr_to_vaddr(cur_pte.paddr());
let offset = C::in_frame_index(vaddr, cur_level);
let offset = pte_index::<C>(vaddr, cur_level);
// Safety: The offset does not exceed the value of PAGE_SIZE.
unsafe { (frame_addr as *const E).add(offset).read() }
};
@ -349,7 +372,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PageTableConstsTrait>(
if cur_pte.is_valid() {
Some((
cur_pte.paddr() + (vaddr & (C::page_size(cur_level) - 1)),
cur_pte.paddr() + (vaddr & (page_size::<C>(cur_level) - 1)),
cur_pte.info(),
))
} else {

View File

@ -4,44 +4,7 @@ use core::fmt::Debug;
use pod::Pod;
use crate::vm::{Paddr, Vaddr, VmPerm};
/// A minimal set of constants that determines the flags of the page table.
/// This provides an abstraction over most paging modes in common architectures.
pub trait PageTableConstsTrait: Debug + 'static {
/// The smallest page size.
const BASE_PAGE_SIZE: usize;
/// The number of levels in the page table.
/// The level 1 is the leaf level, and the level `NR_LEVELS` is the root level.
const NR_LEVELS: usize;
/// The highest level that a PTE can be directly used to translate a VA.
/// This affects the the largest page size supported by the page table.
const HIGHEST_TRANSLATION_LEVEL: usize;
/// The size of a PTE.
const ENTRY_SIZE: usize;
// Here are some const values that are determined by the page table constants.
/// The number of PTEs per page table frame.
const NR_ENTRIES_PER_FRAME: usize = Self::BASE_PAGE_SIZE / Self::ENTRY_SIZE;
/// The number of bits used to index a PTE in a page table frame.
const IN_FRAME_INDEX_BITS: usize = Self::NR_ENTRIES_PER_FRAME.ilog2() as usize;
/// The index of a VA's PTE in a page table frame at the given level.
fn in_frame_index(va: Vaddr, level: usize) -> usize {
va >> (Self::BASE_PAGE_SIZE.ilog2() as usize + Self::IN_FRAME_INDEX_BITS * (level - 1))
& (Self::NR_ENTRIES_PER_FRAME - 1)
}
/// The page size at a given level.
fn page_size(level: usize) -> usize {
Self::BASE_PAGE_SIZE << (Self::IN_FRAME_INDEX_BITS * (level - 1))
}
}
use crate::vm::{Paddr, VmPerm};
bitflags::bitflags! {
/// The status of a memory mapping recorded by the hardware.

View File

@ -78,18 +78,18 @@ fn test_user_copy_on_write() {
type Qr = PageTableQueryResult;
#[derive(Debug)]
struct BasePageTableConsts {}
struct BasePagingConsts {}
impl PageTableConstsTrait for BasePageTableConsts {
impl PagingConstsTrait for BasePagingConsts {
const NR_LEVELS: usize = 4;
const BASE_PAGE_SIZE: usize = PAGE_SIZE;
const HIGHEST_TRANSLATION_LEVEL: usize = 1;
const ENTRY_SIZE: usize = core::mem::size_of::<PageTableEntry>();
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
}
#[ktest]
fn test_base_protect_query() {
let pt = PageTable::<UserMode, PageTableEntry, BasePageTableConsts>::empty();
let pt = PageTable::<UserMode, PageTableEntry, BasePagingConsts>::empty();
let from_ppn = 1..1000;
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
let to = PAGE_SIZE * 1000..PAGE_SIZE * 1999;
@ -115,18 +115,18 @@ fn test_base_protect_query() {
}
#[derive(Debug)]
struct VeryHugePageTableConsts {}
struct VeryHugePagingConsts {}
impl PageTableConstsTrait for VeryHugePageTableConsts {
impl PagingConstsTrait for VeryHugePagingConsts {
const NR_LEVELS: usize = 4;
const BASE_PAGE_SIZE: usize = PAGE_SIZE;
const HIGHEST_TRANSLATION_LEVEL: usize = 3;
const ENTRY_SIZE: usize = core::mem::size_of::<PageTableEntry>();
const PTE_SIZE: usize = core::mem::size_of::<PageTableEntry>();
}
#[ktest]
fn test_large_protect_query() {
let pt = PageTable::<UserMode, PageTableEntry, VeryHugePageTableConsts>::empty();
let pt = PageTable::<UserMode, PageTableEntry, VeryHugePagingConsts>::empty();
let gmult = 512 * 512;
let from_ppn = gmult - 512..gmult + gmult + 514;
let to_ppn = gmult - 512 - 512..gmult + gmult - 512 + 514;

View File

@ -9,13 +9,13 @@ use super::{
is_page_aligned,
kspace::KERNEL_PAGE_TABLE,
page_table::{
MapInfo, MapOp, PageTable, PageTableConstsTrait, PageTableMode,
PageTableQueryResult as PtQr, PageTableQueryResult, UserMode,
MapInfo, MapOp, PageTable, PageTableMode, PageTableQueryResult as PtQr,
PageTableQueryResult, UserMode,
},
VmFrameVec, VmIo, PAGE_SIZE,
PagingConstsTrait, VmFrameVec, VmIo, PAGE_SIZE,
};
use crate::{
arch::mm::{PageTableConsts, PageTableEntry},
arch::mm::{PageTableEntry, PagingConsts},
prelude::*,
vm::{
page_table::{CachePolicy, Cursor, MapProperty},
@ -250,7 +250,7 @@ impl VmMapOptions {
pub fn new() -> Self {
Self {
addr: None,
align: PageTableConsts::BASE_PAGE_SIZE,
align: PagingConsts::BASE_PAGE_SIZE,
perm: VmPerm::empty(),
can_overwrite: false,
}
@ -340,7 +340,7 @@ impl TryFrom<u64> for VmPerm {
/// The iterator for querying over the VM space without modifying it.
pub struct VmQueryIter<'a> {
cursor: Cursor<'a, UserMode, PageTableEntry, PageTableConsts>,
cursor: Cursor<'a, UserMode, PageTableEntry, PagingConsts>,
}
pub enum VmQueryResult {