mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-20 23:36:34 +00:00
Simplify visibility of page table APIs since the module is already pub(crate)
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
668997ab51
commit
fd63f8307d
@ -76,7 +76,7 @@ use super::{
|
|||||||
use crate::mm::{page::DynPage, Paddr, PageProperty, Vaddr};
|
use crate::mm::{page::DynPage, Paddr, PageProperty, Vaddr};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) enum PageTableQueryResult {
|
pub enum PageTableQueryResult {
|
||||||
NotMapped {
|
NotMapped {
|
||||||
va: Vaddr,
|
va: Vaddr,
|
||||||
len: usize,
|
len: usize,
|
||||||
@ -105,7 +105,7 @@ pub(crate) enum PageTableQueryResult {
|
|||||||
/// simulate the recursion, and adpot a page table locking protocol to
|
/// simulate the recursion, and adpot a page table locking protocol to
|
||||||
/// provide concurrency.
|
/// provide concurrency.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
|
pub struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>
|
||||||
where
|
where
|
||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
@ -140,10 +140,7 @@ where
|
|||||||
///
|
///
|
||||||
/// Note that this function does not ensure exclusive access to the claimed
|
/// Note that this function does not ensure exclusive access to the claimed
|
||||||
/// virtual address range. The accesses using this cursor may block or fail.
|
/// virtual address range. The accesses using this cursor may block or fail.
|
||||||
pub(crate) fn new(
|
pub fn new(pt: &'a PageTable<M, E, C>, va: &Range<Vaddr>) -> Result<Self, PageTableError> {
|
||||||
pt: &'a PageTable<M, E, C>,
|
|
||||||
va: &Range<Vaddr>,
|
|
||||||
) -> Result<Self, PageTableError> {
|
|
||||||
if !M::covers(va) {
|
if !M::covers(va) {
|
||||||
return Err(PageTableError::InvalidVaddrRange(va.start, va.end));
|
return Err(PageTableError::InvalidVaddrRange(va.start, va.end));
|
||||||
}
|
}
|
||||||
@ -198,7 +195,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the information of the current slot.
|
/// Gets the information of the current slot.
|
||||||
pub(crate) fn query(&mut self) -> Result<PageTableQueryResult, PageTableError> {
|
pub fn query(&mut self) -> Result<PageTableQueryResult, PageTableError> {
|
||||||
if self.va >= self.barrier_va.end {
|
if self.va >= self.barrier_va.end {
|
||||||
return Err(PageTableError::InvalidVaddr(self.va));
|
return Err(PageTableError::InvalidVaddr(self.va));
|
||||||
}
|
}
|
||||||
@ -261,7 +258,7 @@ where
|
|||||||
///
|
///
|
||||||
/// This method panics if the address is out of the range where the cursor is required to operate,
|
/// This method panics if the address is out of the range where the cursor is required to operate,
|
||||||
/// or has bad alignment.
|
/// or has bad alignment.
|
||||||
pub(crate) fn jump(&mut self, va: Vaddr) {
|
pub fn jump(&mut self, va: Vaddr) {
|
||||||
assert!(self.barrier_va.contains(&va));
|
assert!(self.barrier_va.contains(&va));
|
||||||
assert!(va % C::BASE_PAGE_SIZE == 0);
|
assert!(va % C::BASE_PAGE_SIZE == 0);
|
||||||
|
|
||||||
@ -374,7 +371,7 @@ where
|
|||||||
/// Also, it has all the capabilities of a [`Cursor`]. A virtual address range
|
/// Also, it has all the capabilities of a [`Cursor`]. A virtual address range
|
||||||
/// in a page table can only be accessed by one cursor whether it is mutable or not.
|
/// in a page table can only be accessed by one cursor whether it is mutable or not.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct CursorMut<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>(
|
pub struct CursorMut<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>(
|
||||||
Cursor<'a, M, E, C>,
|
Cursor<'a, M, E, C>,
|
||||||
)
|
)
|
||||||
where
|
where
|
||||||
@ -408,7 +405,7 @@ where
|
|||||||
///
|
///
|
||||||
/// This method panics if the address is out of the range where the cursor is required to operate,
|
/// This method panics if the address is out of the range where the cursor is required to operate,
|
||||||
/// or has bad alignment.
|
/// or has bad alignment.
|
||||||
pub(crate) fn jump(&mut self, va: Vaddr) {
|
pub fn jump(&mut self, va: Vaddr) {
|
||||||
self.0.jump(va)
|
self.0.jump(va)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -418,7 +415,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the information of the current slot.
|
/// Gets the information of the current slot.
|
||||||
pub(crate) fn query(&mut self) -> Result<PageTableQueryResult, PageTableError> {
|
pub fn query(&mut self) -> Result<PageTableQueryResult, PageTableError> {
|
||||||
self.0.query()
|
self.0.query()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -435,7 +432,7 @@ where
|
|||||||
///
|
///
|
||||||
/// The caller should ensure that the virtual range being mapped does
|
/// The caller should ensure that the virtual range being mapped does
|
||||||
/// not affect kernel's memory safety.
|
/// not affect kernel's memory safety.
|
||||||
pub(crate) unsafe fn map(&mut self, page: DynPage, prop: PageProperty) {
|
pub unsafe fn map(&mut self, page: DynPage, prop: PageProperty) {
|
||||||
let end = self.0.va + page.size();
|
let end = self.0.va + page.size();
|
||||||
assert!(end <= self.0.barrier_va.end);
|
assert!(end <= self.0.barrier_va.end);
|
||||||
debug_assert!(self.0.in_tracked_range());
|
debug_assert!(self.0.in_tracked_range());
|
||||||
@ -490,7 +487,7 @@ where
|
|||||||
/// - the range being mapped does not affect kernel's memory safety;
|
/// - the range being mapped does not affect kernel's memory safety;
|
||||||
/// - the physical address to be mapped is valid and safe to use;
|
/// - the physical address to be mapped is valid and safe to use;
|
||||||
/// - it is allowed to map untracked pages in this virtual address range.
|
/// - it is allowed to map untracked pages in this virtual address range.
|
||||||
pub(crate) unsafe fn map_pa(&mut self, pa: &Range<Paddr>, prop: PageProperty) {
|
pub unsafe fn map_pa(&mut self, pa: &Range<Paddr>, prop: PageProperty) {
|
||||||
let end = self.0.va + pa.len();
|
let end = self.0.va + pa.len();
|
||||||
let mut pa = pa.start;
|
let mut pa = pa.start;
|
||||||
assert!(end <= self.0.barrier_va.end);
|
assert!(end <= self.0.barrier_va.end);
|
||||||
@ -540,7 +537,7 @@ where
|
|||||||
/// This function will panic if:
|
/// This function will panic if:
|
||||||
/// - the range to be unmapped is out of the range where the cursor is required to operate;
|
/// - the range to be unmapped is out of the range where the cursor is required to operate;
|
||||||
/// - the range covers only a part of a page.
|
/// - the range covers only a part of a page.
|
||||||
pub(crate) unsafe fn unmap(&mut self, len: usize) {
|
pub unsafe fn unmap(&mut self, len: usize) {
|
||||||
let end = self.0.va + len;
|
let end = self.0.va + len;
|
||||||
assert!(end <= self.0.barrier_va.end);
|
assert!(end <= self.0.barrier_va.end);
|
||||||
assert!(end % C::BASE_PAGE_SIZE == 0);
|
assert!(end % C::BASE_PAGE_SIZE == 0);
|
||||||
@ -597,7 +594,7 @@ where
|
|||||||
///
|
///
|
||||||
/// This function will panic if:
|
/// This function will panic if:
|
||||||
/// - the range to be protected is out of the range where the cursor is required to operate.
|
/// - the range to be protected is out of the range where the cursor is required to operate.
|
||||||
pub(crate) unsafe fn protect(
|
pub unsafe fn protect(
|
||||||
&mut self,
|
&mut self,
|
||||||
len: usize,
|
len: usize,
|
||||||
mut op: impl FnMut(&mut PageProperty),
|
mut op: impl FnMut(&mut PageProperty),
|
||||||
|
@ -14,8 +14,8 @@ use crate::{
|
|||||||
|
|
||||||
mod node;
|
mod node;
|
||||||
use node::*;
|
use node::*;
|
||||||
mod cursor;
|
pub mod cursor;
|
||||||
pub(crate) use cursor::{Cursor, CursorMut, PageTableQueryResult};
|
pub use cursor::{Cursor, CursorMut, PageTableQueryResult};
|
||||||
#[cfg(ktest)]
|
#[cfg(ktest)]
|
||||||
mod test;
|
mod test;
|
||||||
|
|
||||||
@ -78,7 +78,7 @@ const fn pte_index<C: PagingConstsTrait>(va: Vaddr, level: PagingLevel) -> usize
|
|||||||
/// A handle to a page table.
|
/// A handle to a page table.
|
||||||
/// A page table can track the lifetime of the mapped physical pages.
|
/// A page table can track the lifetime of the mapped physical pages.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct PageTable<
|
pub struct PageTable<
|
||||||
M: PageTableMode,
|
M: PageTableMode,
|
||||||
E: PageTableEntryTrait = PageTableEntry,
|
E: PageTableEntryTrait = PageTableEntry,
|
||||||
C: PagingConstsTrait = PagingConsts,
|
C: PagingConstsTrait = PagingConsts,
|
||||||
@ -90,7 +90,7 @@ pub(crate) struct PageTable<
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PageTable<UserMode> {
|
impl PageTable<UserMode> {
|
||||||
pub(crate) fn activate(&self) {
|
pub fn activate(&self) {
|
||||||
// SAFETY: The usermode page table is safe to activate since the kernel
|
// SAFETY: The usermode page table is safe to activate since the kernel
|
||||||
// mappings are shared.
|
// mappings are shared.
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -102,7 +102,7 @@ impl PageTable<UserMode> {
|
|||||||
/// new page table.
|
/// new page table.
|
||||||
///
|
///
|
||||||
/// TODO: We may consider making the page table itself copy-on-write.
|
/// TODO: We may consider making the page table itself copy-on-write.
|
||||||
pub(crate) fn fork_copy_on_write(&self) -> Self {
|
pub fn fork_copy_on_write(&self) -> Self {
|
||||||
let mut cursor = self.cursor_mut(&UserMode::VADDR_RANGE).unwrap();
|
let mut cursor = self.cursor_mut(&UserMode::VADDR_RANGE).unwrap();
|
||||||
|
|
||||||
// SAFETY: Protecting the user page table is safe.
|
// SAFETY: Protecting the user page table is safe.
|
||||||
@ -141,7 +141,7 @@ impl PageTable<KernelMode> {
|
|||||||
///
|
///
|
||||||
/// Then, one can use a user page table to call [`fork_copy_on_write`], creating
|
/// Then, one can use a user page table to call [`fork_copy_on_write`], creating
|
||||||
/// other child page tables.
|
/// other child page tables.
|
||||||
pub(crate) fn create_user_page_table(&self) -> PageTable<UserMode> {
|
pub fn create_user_page_table(&self) -> PageTable<UserMode> {
|
||||||
let root_node = self.root.clone_shallow().lock();
|
let root_node = self.root.clone_shallow().lock();
|
||||||
|
|
||||||
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
|
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
|
||||||
@ -159,7 +159,7 @@ impl PageTable<KernelMode> {
|
|||||||
/// The virtual address range should be aligned to the root level page size. Considering
|
/// The virtual address range should be aligned to the root level page size. Considering
|
||||||
/// usize overflows, the caller should provide the index range of the root level pages
|
/// usize overflows, the caller should provide the index range of the root level pages
|
||||||
/// instead of the virtual address range.
|
/// instead of the virtual address range.
|
||||||
pub(crate) fn make_shared_tables(&self, root_index: Range<usize>) {
|
pub fn make_shared_tables(&self, root_index: Range<usize>) {
|
||||||
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
|
const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::<PagingConsts>();
|
||||||
|
|
||||||
let start = root_index.start;
|
let start = root_index.start;
|
||||||
@ -184,7 +184,7 @@ where
|
|||||||
[(); C::NR_LEVELS as usize]:,
|
[(); C::NR_LEVELS as usize]:,
|
||||||
{
|
{
|
||||||
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
|
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
|
||||||
pub(crate) fn empty() -> Self {
|
pub fn empty() -> Self {
|
||||||
PageTable {
|
PageTable {
|
||||||
root: PageTableNode::<E, C>::alloc(C::NR_LEVELS).into_raw(),
|
root: PageTableNode::<E, C>::alloc(C::NR_LEVELS).into_raw(),
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
@ -199,11 +199,11 @@ where
|
|||||||
///
|
///
|
||||||
/// It is dangerous to directly provide the physical address of the root page table to the
|
/// It is dangerous to directly provide the physical address of the root page table to the
|
||||||
/// hardware since the page table node may be dropped, resulting in UAF.
|
/// hardware since the page table node may be dropped, resulting in UAF.
|
||||||
pub(crate) unsafe fn root_paddr(&self) -> Paddr {
|
pub unsafe fn root_paddr(&self) -> Paddr {
|
||||||
self.root.paddr()
|
self.root.paddr()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) unsafe fn map(
|
pub unsafe fn map(
|
||||||
&self,
|
&self,
|
||||||
vaddr: &Range<Vaddr>,
|
vaddr: &Range<Vaddr>,
|
||||||
paddr: &Range<Paddr>,
|
paddr: &Range<Paddr>,
|
||||||
@ -213,12 +213,12 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) unsafe fn unmap(&self, vaddr: &Range<Vaddr>) -> Result<(), PageTableError> {
|
pub unsafe fn unmap(&self, vaddr: &Range<Vaddr>) -> Result<(), PageTableError> {
|
||||||
self.cursor_mut(vaddr)?.unmap(vaddr.len());
|
self.cursor_mut(vaddr)?.unmap(vaddr.len());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) unsafe fn protect(
|
pub unsafe fn protect(
|
||||||
&self,
|
&self,
|
||||||
vaddr: &Range<Vaddr>,
|
vaddr: &Range<Vaddr>,
|
||||||
op: impl FnMut(&mut PageProperty),
|
op: impl FnMut(&mut PageProperty),
|
||||||
@ -235,16 +235,16 @@ where
|
|||||||
/// cursors concurrently accessing the same virtual address range, just like what
|
/// cursors concurrently accessing the same virtual address range, just like what
|
||||||
/// happens for the hardware MMU walk.
|
/// happens for the hardware MMU walk.
|
||||||
#[cfg(ktest)]
|
#[cfg(ktest)]
|
||||||
pub(crate) fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
|
pub fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
|
||||||
// SAFETY: The root node is a valid page table node so the address is valid.
|
// SAFETY: The root node is a valid page table node so the address is valid.
|
||||||
unsafe { page_walk::<E, C>(self.root_paddr(), vaddr) }
|
unsafe { page_walk::<E, C>(self.root_paddr(), vaddr) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new cursor exclusively accessing the virtual address range for mapping.
|
/// Create a new cursor exclusively accessing the virtual address range for mapping.
|
||||||
///
|
///
|
||||||
/// If another cursor is already accessing the range, the new cursor will wait until the
|
/// If another cursor is already accessing the range, the new cursor may wait until the
|
||||||
/// previous cursor is dropped.
|
/// previous cursor is dropped.
|
||||||
pub(crate) fn cursor_mut(
|
pub fn cursor_mut(
|
||||||
&'a self,
|
&'a self,
|
||||||
va: &Range<Vaddr>,
|
va: &Range<Vaddr>,
|
||||||
) -> Result<CursorMut<'a, M, E, C>, PageTableError> {
|
) -> Result<CursorMut<'a, M, E, C>, PageTableError> {
|
||||||
@ -253,19 +253,17 @@ where
|
|||||||
|
|
||||||
/// Create a new cursor exclusively accessing the virtual address range for querying.
|
/// Create a new cursor exclusively accessing the virtual address range for querying.
|
||||||
///
|
///
|
||||||
/// If another cursor is already accessing the range, the new cursor will wait until the
|
/// If another cursor is already accessing the range, the new cursor may wait until the
|
||||||
/// previous cursor is dropped.
|
/// previous cursor is dropped. The modification to the mapping by the cursor may also
|
||||||
pub(crate) fn cursor(
|
/// block or be overriden by the mapping of another cursor.
|
||||||
&'a self,
|
pub fn cursor(&'a self, va: &Range<Vaddr>) -> Result<Cursor<'a, M, E, C>, PageTableError> {
|
||||||
va: &Range<Vaddr>,
|
|
||||||
) -> Result<Cursor<'a, M, E, C>, PageTableError> {
|
|
||||||
Cursor::new(self, va)
|
Cursor::new(self, va)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new reference to the same page table.
|
/// Create a new reference to the same page table.
|
||||||
/// The caller must ensure that the kernel page table is not copied.
|
/// The caller must ensure that the kernel page table is not copied.
|
||||||
/// This is only useful for IOMMU page tables. Think twice before using it in other cases.
|
/// This is only useful for IOMMU page tables. Think twice before using it in other cases.
|
||||||
pub(crate) unsafe fn shallow_copy(&self) -> Self {
|
pub unsafe fn shallow_copy(&self) -> Self {
|
||||||
PageTable {
|
PageTable {
|
||||||
root: self.root.clone_shallow(),
|
root: self.root.clone_shallow(),
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
@ -340,9 +338,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
|
|||||||
/// The interface for defining architecture-specific page table entries.
|
/// The interface for defining architecture-specific page table entries.
|
||||||
///
|
///
|
||||||
/// Note that a default PTE shoud be a PTE that points to nothing.
|
/// Note that a default PTE shoud be a PTE that points to nothing.
|
||||||
pub(crate) trait PageTableEntryTrait:
|
pub trait PageTableEntryTrait: Clone + Copy + Debug + Default + Pod + Sized + Sync {
|
||||||
Clone + Copy + Debug + Default + Pod + Sized + Sync
|
|
||||||
{
|
|
||||||
/// Create a set of new invalid page table flags that indicates an absent page.
|
/// Create a set of new invalid page table flags that indicates an absent page.
|
||||||
///
|
///
|
||||||
/// Note that currently the implementation requires an all zero PTE to be an absent PTE.
|
/// Note that currently the implementation requires an all zero PTE to be an absent PTE.
|
||||||
|
Reference in New Issue
Block a user