mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-25 02:13:24 +00:00
Regulate the mapping tracking status of page tables
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
909fb23f8c
commit
5bdf85b5f0
@ -13,13 +13,13 @@
|
||||
//!
|
||||
//! ```text
|
||||
//! +-+ <- the highest used address (0xffff_ffff_ffff_0000)
|
||||
//! | | For the kernel code, 1 GiB. Mapped frames are untracked.
|
||||
//! | | For the kernel code, 1 GiB. Mapped frames are tracked.
|
||||
//! +-+ <- 0xffff_ffff_8000_0000
|
||||
//! | |
|
||||
//! | | Unused hole.
|
||||
//! +-+ <- 0xffff_ff00_0000_0000
|
||||
//! | | For frame metadata, 1 TiB.
|
||||
//! | | Mapped frames are untracked.
|
||||
//! | | Mapped frames are tracked with handles.
|
||||
//! +-+ <- 0xffff_fe00_0000_0000
|
||||
//! | | For vm alloc/io mappings, 1 TiB.
|
||||
//! | | Mapped frames are tracked with handles.
|
||||
@ -104,6 +104,13 @@ pub fn paddr_to_vaddr(pa: Paddr) -> usize {
|
||||
pa + LINEAR_MAPPING_BASE_VADDR
|
||||
}
|
||||
|
||||
/// Returns whether the given address should be mapped as tracked.
|
||||
///
|
||||
/// About what is tracked mapping, see [`crate::mm::page::meta::MapTrackingStatus`].
|
||||
pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool {
|
||||
!LINEAR_MAPPING_VADDR_RANGE.contains(&addr)
|
||||
}
|
||||
|
||||
/// The kernel page table instance.
|
||||
///
|
||||
/// It manages the kernel mapping of all address spaces by sharing the kernel part. And it
|
||||
|
@ -180,29 +180,50 @@ impl Sealed for FrameMeta {}
|
||||
/// Make sure the the generic parameters don't effect the memory layout.
|
||||
#[derive(Debug)]
|
||||
#[repr(C)]
|
||||
pub struct PageTablePageMeta<
|
||||
pub(in crate::mm) struct PageTablePageMeta<
|
||||
E: PageTableEntryTrait = PageTableEntry,
|
||||
C: PagingConstsTrait = PagingConsts,
|
||||
> where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
pub level: PagingLevel,
|
||||
/// The lock for the page table page.
|
||||
pub lock: AtomicU8,
|
||||
/// The number of valid PTEs. It is mutable if the lock is held.
|
||||
pub nr_children: UnsafeCell<u16>,
|
||||
/// The level of the page table page. A page table page cannot be
|
||||
/// referenced by page tables of different levels.
|
||||
pub level: PagingLevel,
|
||||
/// Whether the pages mapped by the node is tracked.
|
||||
pub is_tracked: MapTrackingStatus,
|
||||
/// The lock for the page table page.
|
||||
pub lock: AtomicU8,
|
||||
_phantom: core::marker::PhantomData<(E, C)>,
|
||||
}
|
||||
|
||||
/// Describe if the physical address recorded in this page table refers to a
|
||||
/// page tracked by metadata.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub(in crate::mm) enum MapTrackingStatus {
|
||||
/// The page table node cannot contain references to any pages. It can only
|
||||
/// contain references to child page table nodes.
|
||||
NotApplicable,
|
||||
/// The mapped pages are not tracked by metadata. If any child page table
|
||||
/// nodes exist, they should also be tracked.
|
||||
Untracked,
|
||||
/// The mapped pages are tracked by metadata. If any child page table nodes
|
||||
/// exist, they should also be tracked.
|
||||
Tracked,
|
||||
}
|
||||
|
||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTablePageMeta<E, C>
|
||||
where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
pub fn new_locked(level: PagingLevel) -> Self {
|
||||
pub fn new_locked(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
|
||||
Self {
|
||||
level,
|
||||
lock: AtomicU8::new(1),
|
||||
nr_children: UnsafeCell::new(0),
|
||||
level,
|
||||
is_tracked,
|
||||
lock: AtomicU8::new(1),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,11 @@ use super::{
|
||||
};
|
||||
use crate::{
|
||||
mm::{
|
||||
page::{meta::PageTablePageMeta, DynPage, Page},
|
||||
kspace::should_map_as_tracked,
|
||||
page::{
|
||||
meta::{MapTrackingStatus, PageTablePageMeta},
|
||||
DynPage, Page,
|
||||
},
|
||||
Paddr, PageProperty, Vaddr,
|
||||
},
|
||||
task::{disable_preempt, DisabledPreemptGuard},
|
||||
@ -229,7 +233,8 @@ where
|
||||
Child::Page(page, prop) => {
|
||||
return Ok(PageTableItem::Mapped { va, page, prop });
|
||||
}
|
||||
Child::Untracked(pa, prop) => {
|
||||
Child::Untracked(pa, plevel, prop) => {
|
||||
debug_assert_eq!(plevel, level);
|
||||
return Ok(PageTableItem::MappedUntracked {
|
||||
va,
|
||||
pa,
|
||||
@ -323,6 +328,12 @@ where
|
||||
self.guards[(self.level - 1) as usize] = Some(nxt_lvl_ptn_locked);
|
||||
}
|
||||
|
||||
fn should_map_as_tracked(&self) -> bool {
|
||||
(TypeId::of::<M>() == TypeId::of::<KernelMode>()
|
||||
|| TypeId::of::<M>() == TypeId::of::<UserMode>())
|
||||
&& should_map_as_tracked(self.va)
|
||||
}
|
||||
|
||||
fn cur_node(&self) -> &PageTableNode<E, C> {
|
||||
self.guards[(self.level - 1) as usize].as_ref().unwrap()
|
||||
}
|
||||
@ -332,30 +343,12 @@ where
|
||||
}
|
||||
|
||||
fn cur_child(&self) -> Child<E, C> {
|
||||
self.cur_node()
|
||||
.child(self.cur_idx(), self.in_tracked_range())
|
||||
self.cur_node().child(self.cur_idx())
|
||||
}
|
||||
|
||||
fn read_cur_pte(&self) -> E {
|
||||
self.cur_node().read_pte(self.cur_idx())
|
||||
}
|
||||
|
||||
/// Tells if the current virtual range must contain untracked mappings.
|
||||
///
|
||||
/// _Tracked mappings_ means that the mapped physical addresses (in PTEs) points to pages
|
||||
/// tracked by the metadata system. _Tracked mappings_ must be created with page handles.
|
||||
/// While _untracked mappings_ solely maps to plain physical addresses.
|
||||
///
|
||||
/// In the kernel mode, this is aligned with the definition in [`crate::mm::kspace`].
|
||||
/// Only linear mappings in the kernel should be considered as untracked mappings.
|
||||
///
|
||||
/// All mappings in the user mode are tracked. And all mappings in the IOMMU
|
||||
/// page table are untracked.
|
||||
fn in_tracked_range(&self) -> bool {
|
||||
TypeId::of::<M>() == TypeId::of::<UserMode>()
|
||||
|| TypeId::of::<M>() == TypeId::of::<KernelMode>()
|
||||
&& !crate::mm::kspace::LINEAR_MAPPING_VADDR_RANGE.contains(&self.va)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> Iterator
|
||||
@ -445,13 +438,14 @@ where
|
||||
pub unsafe fn map(&mut self, page: DynPage, prop: PageProperty) -> Option<DynPage> {
|
||||
let end = self.0.va + page.size();
|
||||
assert!(end <= self.0.barrier_va.end);
|
||||
debug_assert!(self.0.in_tracked_range());
|
||||
|
||||
// Go down if not applicable.
|
||||
while self.0.level > C::HIGHEST_TRANSLATION_LEVEL
|
||||
|| self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
debug_assert!(self.0.should_map_as_tracked());
|
||||
|
||||
let pte = self.0.read_cur_pte();
|
||||
if pte.is_present() && !pte.is_last(self.0.level) {
|
||||
self.0.level_down();
|
||||
@ -468,7 +462,7 @@ where
|
||||
let idx = self.0.cur_idx();
|
||||
let old = self
|
||||
.cur_node_mut()
|
||||
.replace_child(idx, Child::Page(page, prop), true);
|
||||
.replace_child(idx, Child::Page(page, prop));
|
||||
self.0.move_forward();
|
||||
|
||||
match old {
|
||||
@ -477,7 +471,7 @@ where
|
||||
Child::PageTable(_) => {
|
||||
todo!("Dropping page table nodes while mapping requires TLB flush")
|
||||
}
|
||||
Child::Untracked(_, _) => panic!("Mapping a tracked page in an untracked range"),
|
||||
Child::Untracked(_, _, _) => panic!("Mapping a tracked page in an untracked range"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -537,11 +531,12 @@ where
|
||||
}
|
||||
|
||||
// Map the current page.
|
||||
debug_assert!(!self.0.in_tracked_range());
|
||||
debug_assert!(!self.0.should_map_as_tracked());
|
||||
let idx = self.0.cur_idx();
|
||||
let level = self.0.level;
|
||||
let _ = self
|
||||
.cur_node_mut()
|
||||
.replace_child(idx, Child::Untracked(pa, prop), false);
|
||||
.replace_child(idx, Child::Untracked(pa, level, prop));
|
||||
|
||||
let level = self.0.level;
|
||||
pa += page_size::<C>(level);
|
||||
@ -581,7 +576,6 @@ where
|
||||
|
||||
while self.0.va < end {
|
||||
let cur_pte = self.0.read_cur_pte();
|
||||
let is_tracked = self.0.in_tracked_range();
|
||||
|
||||
// Skip if it is already absent.
|
||||
if !cur_pte.is_present() {
|
||||
@ -596,15 +590,15 @@ where
|
||||
if self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
if !is_tracked {
|
||||
if cur_pte.is_last(self.0.level) {
|
||||
if !self.0.should_map_as_tracked() {
|
||||
// Level down if we are removing part of a huge untracked page.
|
||||
self.level_down_split();
|
||||
continue;
|
||||
}
|
||||
|
||||
if cur_pte.is_last(self.0.level) {
|
||||
} else {
|
||||
panic!("removing part of a huge page");
|
||||
}
|
||||
}
|
||||
|
||||
// Level down if the current PTE points to a page table and we cannot
|
||||
// unmap this page table node entirely.
|
||||
@ -626,11 +620,8 @@ where
|
||||
|
||||
// Unmap the current page and return it.
|
||||
let idx = self.0.cur_idx();
|
||||
let ret = self
|
||||
.cur_node_mut()
|
||||
.replace_child(idx, Child::None, is_tracked);
|
||||
let ret = self.cur_node_mut().replace_child(idx, Child::None);
|
||||
let ret_page_va = self.0.va;
|
||||
let ret_page_size = page_size::<C>(self.0.level);
|
||||
|
||||
self.0.move_forward();
|
||||
|
||||
@ -640,12 +631,15 @@ where
|
||||
page,
|
||||
prop,
|
||||
},
|
||||
Child::Untracked(pa, prop) => PageTableItem::MappedUntracked {
|
||||
Child::Untracked(pa, level, prop) => {
|
||||
debug_assert_eq!(level, self.0.level);
|
||||
PageTableItem::MappedUntracked {
|
||||
va: ret_page_va,
|
||||
pa,
|
||||
len: ret_page_size,
|
||||
len: page_size::<C>(level),
|
||||
prop,
|
||||
},
|
||||
}
|
||||
}
|
||||
Child::PageTable(node) => {
|
||||
let node = ManuallyDrop::new(node);
|
||||
let page = Page::<PageTablePageMeta<E, C>>::from_raw(node.paddr());
|
||||
@ -722,7 +716,7 @@ where
|
||||
if self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
if self.0.in_tracked_range() {
|
||||
if self.0.should_map_as_tracked() {
|
||||
panic!("protecting part of a huge page");
|
||||
} else {
|
||||
self.level_down_split();
|
||||
@ -818,7 +812,7 @@ where
|
||||
src.cur_node_mut().protect(idx, pte_prop);
|
||||
|
||||
// Do copy.
|
||||
let child = src.cur_node_mut().child(idx, true);
|
||||
let child = src.cur_node_mut().child(idx);
|
||||
let Child::<E, C>::Page(page, prop) = child else {
|
||||
panic!("Unexpected child for source mapping: {:#?}", child);
|
||||
};
|
||||
@ -839,14 +833,18 @@ where
|
||||
/// This method will create a new child page table node and go down to it.
|
||||
fn level_down_create(&mut self) {
|
||||
debug_assert!(self.0.level > 1);
|
||||
let new_node = PageTableNode::<E, C>::alloc(self.0.level - 1);
|
||||
let idx = self.0.cur_idx();
|
||||
let is_tracked = self.0.in_tracked_range();
|
||||
let old = self.cur_node_mut().replace_child(
|
||||
idx,
|
||||
Child::PageTable(new_node.clone_raw()),
|
||||
is_tracked,
|
||||
let new_node = PageTableNode::<E, C>::alloc(
|
||||
self.0.level - 1,
|
||||
if self.0.should_map_as_tracked() {
|
||||
MapTrackingStatus::Tracked
|
||||
} else {
|
||||
MapTrackingStatus::Untracked
|
||||
},
|
||||
);
|
||||
let idx = self.0.cur_idx();
|
||||
let old = self
|
||||
.cur_node_mut()
|
||||
.replace_child(idx, Child::PageTable(new_node.clone_raw()));
|
||||
debug_assert!(old.is_none());
|
||||
self.0.level -= 1;
|
||||
self.0.guards[(self.0.level - 1) as usize] = Some(new_node);
|
||||
@ -857,7 +855,7 @@ where
|
||||
/// This method will split the huge page and go down to the next level.
|
||||
fn level_down_split(&mut self) {
|
||||
debug_assert!(self.0.level > 1);
|
||||
debug_assert!(!self.0.in_tracked_range());
|
||||
debug_assert!(!self.0.should_map_as_tracked());
|
||||
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().split_untracked_huge(idx);
|
||||
|
@ -3,8 +3,8 @@
|
||||
use core::{fmt::Debug, marker::PhantomData, ops::Range};
|
||||
|
||||
use super::{
|
||||
nr_subpage_per_huge, page_prop::PageProperty, page_size, Paddr, PagingConstsTrait, PagingLevel,
|
||||
Vaddr,
|
||||
nr_subpage_per_huge, page::meta::MapTrackingStatus, page_prop::PageProperty, page_size, Paddr,
|
||||
PagingConstsTrait, PagingLevel, Vaddr,
|
||||
};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
@ -101,15 +101,16 @@ impl PageTable<KernelMode> {
|
||||
/// duplicate the kernel page table with all the kernel mappings shared.
|
||||
pub fn create_user_page_table(&self) -> PageTable<UserMode> {
|
||||
let root_node = self.root.clone_shallow().lock();
|
||||
let mut new_node = PageTableNode::alloc(PagingConsts::NR_LEVELS);
|
||||
let mut new_node =
|
||||
PageTableNode::alloc(PagingConsts::NR_LEVELS, MapTrackingStatus::NotApplicable);
|
||||
|
||||
// Make a shallow copy of the root node in the kernel space range.
|
||||
// The user space range is not copied.
|
||||
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
|
||||
for i in NR_PTES_PER_NODE / 2..NR_PTES_PER_NODE {
|
||||
let child = root_node.child(i, /* meaningless */ true);
|
||||
let child = root_node.child(i);
|
||||
if !child.is_none() {
|
||||
let _ = new_node.replace_child(i, child, /* meaningless */ true);
|
||||
let _ = new_node.replace_child(i, child);
|
||||
}
|
||||
}
|
||||
|
||||
@ -137,12 +138,16 @@ impl PageTable<KernelMode> {
|
||||
let mut root_node = self.root.clone_shallow().lock();
|
||||
for i in start..end {
|
||||
if !root_node.read_pte(i).is_present() {
|
||||
let node = PageTableNode::alloc(PagingConsts::NR_LEVELS - 1);
|
||||
let _ = root_node.replace_child(
|
||||
i,
|
||||
Child::PageTable(node.into_raw()),
|
||||
i < NR_PTES_PER_NODE * 3 / 4,
|
||||
);
|
||||
let nxt_level = PagingConsts::NR_LEVELS - 1;
|
||||
let is_tracked = if super::kspace::should_map_as_tracked(
|
||||
i * page_size::<PagingConsts>(nxt_level),
|
||||
) {
|
||||
MapTrackingStatus::Tracked
|
||||
} else {
|
||||
MapTrackingStatus::Untracked
|
||||
};
|
||||
let node = PageTableNode::alloc(nxt_level, is_tracked);
|
||||
let _ = root_node.replace_child(i, Child::PageTable(node.into_raw()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -175,7 +180,8 @@ where
|
||||
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
|
||||
pub fn empty() -> Self {
|
||||
PageTable {
|
||||
root: PageTableNode::<E, C>::alloc(C::NR_LEVELS).into_raw(),
|
||||
root: PageTableNode::<E, C>::alloc(C::NR_LEVELS, MapTrackingStatus::NotApplicable)
|
||||
.into_raw(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
133
ostd/src/mm/page_table/node/child.rs
Normal file
133
ostd/src/mm/page_table/node/child.rs
Normal file
@ -0,0 +1,133 @@
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//! This module specifies the type of the children of a page table node.
|
||||
|
||||
use core::{mem::ManuallyDrop, panic};
|
||||
|
||||
use super::{PageTableEntryTrait, RawPageTableNode};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
mm::{
|
||||
page::{
|
||||
meta::{MapTrackingStatus, PageTablePageMeta},
|
||||
DynPage, Page,
|
||||
},
|
||||
page_prop::PageProperty,
|
||||
Paddr, PagingConstsTrait, PagingLevel,
|
||||
},
|
||||
};
|
||||
|
||||
/// A child of a page table node.
|
||||
#[derive(Debug)]
|
||||
pub(in crate::mm) enum Child<
|
||||
E: PageTableEntryTrait = PageTableEntry,
|
||||
C: PagingConstsTrait = PagingConsts,
|
||||
> where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
PageTable(RawPageTableNode<E, C>),
|
||||
Page(DynPage, PageProperty),
|
||||
/// Pages not tracked by handles.
|
||||
Untracked(Paddr, PagingLevel, PageProperty),
|
||||
None,
|
||||
}
|
||||
|
||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<E, C>
|
||||
where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
/// Returns whether the child does not map to anything.
|
||||
pub(in crate::mm) fn is_none(&self) -> bool {
|
||||
matches!(self, Child::None)
|
||||
}
|
||||
|
||||
/// Converts a child into a owning PTE.
|
||||
///
|
||||
/// By conversion it loses information about whether the page is tracked
|
||||
/// or not. Also it loses the level information. However, the returned PTE
|
||||
/// takes the ownership (reference count) of the child.
|
||||
///
|
||||
/// Usually this is for recording the PTE into a page table node. When the
|
||||
/// child is needed again by reading the PTE of a page table node, extra
|
||||
/// information should be provided using the [`Child::from_pte`] method.
|
||||
pub(super) fn into_pte(self) -> E {
|
||||
match self {
|
||||
Child::PageTable(pt) => {
|
||||
let pt = ManuallyDrop::new(pt);
|
||||
E::new_pt(pt.paddr())
|
||||
}
|
||||
Child::Page(page, prop) => {
|
||||
let level = page.level();
|
||||
E::new_page(page.into_raw(), level, prop)
|
||||
}
|
||||
Child::Untracked(pa, level, prop) => E::new_page(pa, level, prop),
|
||||
Child::None => E::new_absent(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a PTE back to a child.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The provided PTE must be originated from [`Child::into_pte`]. And the
|
||||
/// provided information (level and tracking status) must align with the
|
||||
/// lost information during the conversion.
|
||||
///
|
||||
/// This method should be only used no more than once for a PTE that has
|
||||
/// been converted from a child using the [`Child::into_pte`] method.
|
||||
pub(super) unsafe fn from_pte(
|
||||
pte: E,
|
||||
level: PagingLevel,
|
||||
is_tracked: MapTrackingStatus,
|
||||
) -> Self {
|
||||
if !pte.is_present() {
|
||||
Child::None
|
||||
} else {
|
||||
let paddr = pte.paddr();
|
||||
if !pte.is_last(level) {
|
||||
Child::PageTable(RawPageTableNode::from_paddr(paddr))
|
||||
} else {
|
||||
match is_tracked {
|
||||
MapTrackingStatus::Tracked => Child::Page(DynPage::from_raw(paddr), pte.prop()),
|
||||
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),
|
||||
MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gains an extra owning reference to the child.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The provided PTE must be originated from [`Child::into_pte`]. And the
|
||||
/// provided information (level and tracking status) must align with the
|
||||
/// lost information during the conversion.
|
||||
///
|
||||
/// This method must not be used with a PTE that has been restored to a
|
||||
/// child using the [`Child::from_pte`] method.
|
||||
pub(super) unsafe fn clone_from_pte(
|
||||
pte: &E,
|
||||
level: PagingLevel,
|
||||
is_tracked: MapTrackingStatus,
|
||||
) -> Self {
|
||||
if !pte.is_present() {
|
||||
Child::None
|
||||
} else {
|
||||
let paddr = pte.paddr();
|
||||
if !pte.is_last(level) {
|
||||
Page::<PageTablePageMeta<E, C>>::inc_ref_count(paddr);
|
||||
Child::PageTable(RawPageTableNode::from_paddr(paddr))
|
||||
} else {
|
||||
match is_tracked {
|
||||
MapTrackingStatus::Tracked => {
|
||||
DynPage::inc_ref_count(paddr);
|
||||
Child::Page(DynPage::from_raw(paddr), pte.prop())
|
||||
}
|
||||
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),
|
||||
MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -25,8 +25,12 @@
|
||||
//! the initialization of the entity that the PTE points to. This is taken care in this module.
|
||||
//!
|
||||
|
||||
mod child;
|
||||
|
||||
use core::{fmt, marker::PhantomData, mem::ManuallyDrop, panic, sync::atomic::Ordering};
|
||||
|
||||
pub(in crate::mm) use child::Child;
|
||||
|
||||
use super::{nr_subpage_per_huge, page_size, PageTableEntryTrait};
|
||||
use crate::{
|
||||
arch::mm::{PageTableEntry, PagingConsts},
|
||||
@ -34,7 +38,7 @@ use crate::{
|
||||
paddr_to_vaddr,
|
||||
page::{
|
||||
self,
|
||||
meta::{PageMeta, PageTablePageMeta, PageUsage},
|
||||
meta::{MapTrackingStatus, PageMeta, PageTablePageMeta, PageUsage},
|
||||
DynPage, Page,
|
||||
},
|
||||
page_prop::PageProperty,
|
||||
@ -155,6 +159,20 @@ where
|
||||
Page::<PageTablePageMeta<E, C>>::inc_ref_count(self.paddr());
|
||||
}
|
||||
}
|
||||
|
||||
/// Restore the handle to a page table node from a physical address.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must ensure that the physical address is valid and points to
|
||||
/// a forgotten page table node. A forgotten page table node can only be
|
||||
/// restored once.
|
||||
unsafe fn from_paddr(paddr: Paddr) -> Self {
|
||||
Self {
|
||||
raw: paddr,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for RawPageTableNode<E, C>
|
||||
@ -200,28 +218,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A child of a page table node.
|
||||
#[derive(Debug)]
|
||||
pub(super) enum Child<E: PageTableEntryTrait = PageTableEntry, C: PagingConstsTrait = PagingConsts>
|
||||
where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
PageTable(RawPageTableNode<E, C>),
|
||||
Page(DynPage, PageProperty),
|
||||
/// Pages not tracked by handles.
|
||||
Untracked(Paddr, PageProperty),
|
||||
None,
|
||||
}
|
||||
|
||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Child<E, C>
|
||||
where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
{
|
||||
pub(super) fn is_none(&self) -> bool {
|
||||
matches!(self, Child::None)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C>
|
||||
where
|
||||
[(); C::NR_LEVELS as usize]:,
|
||||
@ -231,8 +227,8 @@ where
|
||||
/// This function returns an owning handle. The newly created handle does not
|
||||
/// set the lock bit for performance as it is exclusive and unlocking is an
|
||||
/// extra unnecessary expensive operation.
|
||||
pub(super) fn alloc(level: PagingLevel) -> Self {
|
||||
let meta = PageTablePageMeta::new_locked(level);
|
||||
pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
|
||||
let meta = PageTablePageMeta::new_locked(level, is_tracked);
|
||||
let page = page::allocator::alloc_single::<PageTablePageMeta<E, C>>(meta).unwrap();
|
||||
|
||||
// Zero out the page table node.
|
||||
@ -249,6 +245,10 @@ where
|
||||
self.meta().level
|
||||
}
|
||||
|
||||
pub fn is_tracked(&self) -> MapTrackingStatus {
|
||||
self.meta().is_tracked
|
||||
}
|
||||
|
||||
/// Converts the handle into a raw handle to be stored in a PTE or CPU.
|
||||
pub(super) fn into_raw(self) -> RawPageTableNode<E, C> {
|
||||
let this = ManuallyDrop::new(self);
|
||||
@ -274,102 +274,52 @@ where
|
||||
}
|
||||
|
||||
/// Gets an extra reference of the child at the given index.
|
||||
pub(super) fn child(&self, idx: usize, in_tracked_range: bool) -> Child<E, C> {
|
||||
pub(super) fn child(&self, idx: usize) -> Child<E, C> {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
let pte = self.read_pte(idx);
|
||||
if !pte.is_present() {
|
||||
Child::None
|
||||
} else {
|
||||
let paddr = pte.paddr();
|
||||
if !pte.is_last(self.level()) {
|
||||
// SAFETY: We have a reference count to the page and can safely increase the reference
|
||||
// count by one more.
|
||||
unsafe {
|
||||
Page::<PageTablePageMeta<E, C>>::inc_ref_count(paddr);
|
||||
}
|
||||
Child::PageTable(RawPageTableNode {
|
||||
raw: paddr,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
} else if in_tracked_range {
|
||||
// SAFETY: We have a reference count to the page and can safely
|
||||
// increase the reference count by one more.
|
||||
unsafe {
|
||||
DynPage::inc_ref_count(paddr);
|
||||
}
|
||||
// SAFETY: The physical address of the PTE points to a forgotten
|
||||
// page. It is reclaimed only once.
|
||||
Child::Page(unsafe { DynPage::from_raw(paddr) }, pte.prop())
|
||||
} else {
|
||||
Child::Untracked(paddr, pte.prop())
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: The PTE is read from this page table node so the information
|
||||
// recorded in this page table is correct.
|
||||
unsafe { Child::clone_from_pte(&pte, self.level(), self.is_tracked()) }
|
||||
}
|
||||
|
||||
/// Replace the child at the given index with a new child.
|
||||
///
|
||||
/// The old child is returned.
|
||||
pub(super) fn replace_child(
|
||||
&mut self,
|
||||
idx: usize,
|
||||
new_child: Child<E, C>,
|
||||
in_tracked_range: bool,
|
||||
) -> Child<E, C> {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
/// The old child is returned. The new child must match the level of the page
|
||||
/// table node and the tracking status of the page table node.
|
||||
pub(super) fn replace_child(&mut self, idx: usize, new_child: Child<E, C>) -> Child<E, C> {
|
||||
// It should be ensured by the cursor.
|
||||
#[cfg(debug_assertions)]
|
||||
match &new_child {
|
||||
Child::PageTable(_) => {
|
||||
debug_assert!(self.level() > 1);
|
||||
}
|
||||
Child::Page(p, _) => {
|
||||
debug_assert!(self.level() == p.level());
|
||||
debug_assert!(self.is_tracked() == MapTrackingStatus::Tracked);
|
||||
}
|
||||
Child::Untracked(_, level, _) => {
|
||||
debug_assert!(self.level() == *level);
|
||||
debug_assert!(self.is_tracked() == MapTrackingStatus::Untracked);
|
||||
}
|
||||
Child::None => {}
|
||||
}
|
||||
|
||||
let old_pte = self.read_pte(idx);
|
||||
let pte = self.read_pte(idx);
|
||||
// SAFETY: The PTE is read from this page table node so the information
|
||||
// provided is correct. The PTE is not restored twice.
|
||||
let old_child = unsafe { Child::from_pte(pte, self.level(), self.is_tracked()) };
|
||||
|
||||
let new_child_is_none = match new_child {
|
||||
Child::None => {
|
||||
if old_pte.is_present() {
|
||||
self.write_pte(idx, E::new_absent());
|
||||
}
|
||||
true
|
||||
}
|
||||
Child::PageTable(pt) => {
|
||||
let pt = ManuallyDrop::new(pt);
|
||||
let new_pte = E::new_pt(pt.paddr());
|
||||
self.write_pte(idx, new_pte);
|
||||
false
|
||||
}
|
||||
Child::Page(page, prop) => {
|
||||
debug_assert!(in_tracked_range);
|
||||
let new_pte = E::new_page(page.into_raw(), self.level(), prop);
|
||||
self.write_pte(idx, new_pte);
|
||||
false
|
||||
}
|
||||
Child::Untracked(pa, prop) => {
|
||||
debug_assert!(!in_tracked_range);
|
||||
let new_pte = E::new_page(pa, self.level(), prop);
|
||||
self.write_pte(idx, new_pte);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if old_pte.is_present() {
|
||||
if new_child_is_none {
|
||||
if old_child.is_none() && !new_child.is_none() {
|
||||
*self.nr_children_mut() += 1;
|
||||
} else if !old_child.is_none() && new_child.is_none() {
|
||||
*self.nr_children_mut() -= 1;
|
||||
}
|
||||
let paddr = old_pte.paddr();
|
||||
if !old_pte.is_last(self.level()) {
|
||||
Child::PageTable(RawPageTableNode {
|
||||
raw: paddr,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
} else if in_tracked_range {
|
||||
// SAFETY: The physical address of the old PTE points to a
|
||||
// forgotten page. It is reclaimed only once.
|
||||
Child::Page(unsafe { DynPage::from_raw(paddr) }, old_pte.prop())
|
||||
} else {
|
||||
Child::Untracked(paddr, old_pte.prop())
|
||||
}
|
||||
} else {
|
||||
if !new_child_is_none {
|
||||
*self.nr_children_mut() += 1;
|
||||
}
|
||||
Child::None
|
||||
}
|
||||
|
||||
self.write_pte(idx, new_child.into_pte());
|
||||
|
||||
old_child
|
||||
}
|
||||
|
||||
/// Splits the untracked huge page mapped at `idx` to smaller pages.
|
||||
@ -378,17 +328,19 @@ where
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
debug_assert!(self.level() > 1);
|
||||
|
||||
let Child::Untracked(pa, prop) = self.child(idx, false) else {
|
||||
let Child::Untracked(pa, level, prop) = self.child(idx) else {
|
||||
panic!("`split_untracked_huge` not called on an untracked huge page");
|
||||
};
|
||||
|
||||
let mut new_page = PageTableNode::<E, C>::alloc(self.level() - 1);
|
||||
debug_assert_eq!(level, self.level());
|
||||
|
||||
let mut new_page = PageTableNode::<E, C>::alloc(level - 1, MapTrackingStatus::Untracked);
|
||||
for i in 0..nr_subpage_per_huge::<C>() {
|
||||
let small_pa = pa + i * page_size::<C>(self.level() - 1);
|
||||
new_page.replace_child(i, Child::Untracked(small_pa, prop), false);
|
||||
let small_pa = pa + i * page_size::<C>(level - 1);
|
||||
new_page.replace_child(i, Child::Untracked(small_pa, level - 1, prop));
|
||||
}
|
||||
|
||||
self.replace_child(idx, Child::PageTable(new_page.into_raw()), false);
|
||||
self.replace_child(idx, Child::PageTable(new_page.into_raw()));
|
||||
}
|
||||
|
||||
/// Protects an already mapped child at a given index.
|
||||
@ -467,6 +419,7 @@ where
|
||||
fn on_drop(page: &mut Page<Self>) {
|
||||
let paddr = page.paddr();
|
||||
let level = page.meta().level;
|
||||
let is_tracked = page.meta().is_tracked;
|
||||
|
||||
// Drop the children.
|
||||
for i in 0..nr_subpage_per_huge::<C>() {
|
||||
@ -476,19 +429,21 @@ where
|
||||
let pte_ptr = unsafe { (paddr_to_vaddr(paddr) as *const E).add(i) };
|
||||
// SAFETY: The pointer is valid and the PTE is plain-old-data.
|
||||
let pte = unsafe { pte_ptr.read() };
|
||||
|
||||
// Here if we use directly `Child::from_pte` we would experience a
|
||||
// 50% increase in the overhead of the `drop` function. It seems that
|
||||
// Rust is very conservative about inlining and optimizing dead code
|
||||
// for `unsafe` code. So we manually inline the function here.
|
||||
if pte.is_present() {
|
||||
// Just restore the handle and drop the handle.
|
||||
let paddr = pte.paddr();
|
||||
if !pte.is_last(level) {
|
||||
// This is a page table.
|
||||
// SAFETY: The physical address must be casted from a handle to a
|
||||
// page table node.
|
||||
drop(unsafe { Page::<Self>::from_raw(pte.paddr()) });
|
||||
} else {
|
||||
// This is a page. You cannot drop a page table node that maps to
|
||||
// untracked pages. This must be verified.
|
||||
// SAFETY: The physical address must be casted from a handle to a
|
||||
// page.
|
||||
drop(unsafe { DynPage::from_raw(pte.paddr()) });
|
||||
// SAFETY: The PTE points to a page table node. The ownership
|
||||
// of the child is transferred to the child then dropped.
|
||||
drop(unsafe { Page::<Self>::from_raw(paddr) });
|
||||
} else if is_tracked == MapTrackingStatus::Tracked {
|
||||
// SAFETY: The PTE points to a tracked page. The ownership
|
||||
// of the child is transferred to the child then dropped.
|
||||
drop(unsafe { DynPage::from_raw(paddr) });
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user