diff --git a/ostd/src/mm/page_table/cursor.rs b/ostd/src/mm/page_table/cursor.rs index 5f8bdde9d..a5b233800 100644 --- a/ostd/src/mm/page_table/cursor.rs +++ b/ostd/src/mm/page_table/cursor.rs @@ -76,7 +76,7 @@ use super::{ use crate::mm::{page::DynPage, Paddr, PageProperty, Vaddr}; #[derive(Clone, Debug)] -pub(crate) enum PageTableQueryResult { +pub enum PageTableQueryResult { NotMapped { va: Vaddr, len: usize, @@ -105,7 +105,7 @@ pub(crate) enum PageTableQueryResult { /// simulate the recursion, and adpot a page table locking protocol to /// provide concurrency. #[derive(Debug)] -pub(crate) struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> +pub struct Cursor<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait> where [(); C::NR_LEVELS as usize]:, { @@ -140,10 +140,7 @@ where /// /// Note that this function does not ensure exclusive access to the claimed /// virtual address range. The accesses using this cursor may block or fail. - pub(crate) fn new( - pt: &'a PageTable, - va: &Range, - ) -> Result { + pub fn new(pt: &'a PageTable, va: &Range) -> Result { if !M::covers(va) { return Err(PageTableError::InvalidVaddrRange(va.start, va.end)); } @@ -198,7 +195,7 @@ where } /// Gets the information of the current slot. - pub(crate) fn query(&mut self) -> Result { + pub fn query(&mut self) -> Result { if self.va >= self.barrier_va.end { return Err(PageTableError::InvalidVaddr(self.va)); } @@ -261,7 +258,7 @@ where /// /// This method panics if the address is out of the range where the cursor is required to operate, /// or has bad alignment. - pub(crate) fn jump(&mut self, va: Vaddr) { + pub fn jump(&mut self, va: Vaddr) { assert!(self.barrier_va.contains(&va)); assert!(va % C::BASE_PAGE_SIZE == 0); @@ -374,7 +371,7 @@ where /// Also, it has all the capabilities of a [`Cursor`]. A virtual address range /// in a page table can only be accessed by one cursor whether it is mutable or not. #[derive(Debug)] -pub(crate) struct CursorMut<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( +pub struct CursorMut<'a, M: PageTableMode, E: PageTableEntryTrait, C: PagingConstsTrait>( Cursor<'a, M, E, C>, ) where @@ -408,7 +405,7 @@ where /// /// This method panics if the address is out of the range where the cursor is required to operate, /// or has bad alignment. - pub(crate) fn jump(&mut self, va: Vaddr) { + pub fn jump(&mut self, va: Vaddr) { self.0.jump(va) } @@ -418,7 +415,7 @@ where } /// Gets the information of the current slot. - pub(crate) fn query(&mut self) -> Result { + pub fn query(&mut self) -> Result { self.0.query() } @@ -435,7 +432,7 @@ where /// /// The caller should ensure that the virtual range being mapped does /// not affect kernel's memory safety. - pub(crate) unsafe fn map(&mut self, page: DynPage, prop: PageProperty) { + pub unsafe fn map(&mut self, page: DynPage, prop: PageProperty) { let end = self.0.va + page.size(); assert!(end <= self.0.barrier_va.end); debug_assert!(self.0.in_tracked_range()); @@ -490,7 +487,7 @@ where /// - the range being mapped does not affect kernel's memory safety; /// - the physical address to be mapped is valid and safe to use; /// - it is allowed to map untracked pages in this virtual address range. - pub(crate) unsafe fn map_pa(&mut self, pa: &Range, prop: PageProperty) { + pub unsafe fn map_pa(&mut self, pa: &Range, prop: PageProperty) { let end = self.0.va + pa.len(); let mut pa = pa.start; assert!(end <= self.0.barrier_va.end); @@ -540,7 +537,7 @@ where /// This function will panic if: /// - the range to be unmapped is out of the range where the cursor is required to operate; /// - the range covers only a part of a page. - pub(crate) unsafe fn unmap(&mut self, len: usize) { + pub unsafe fn unmap(&mut self, len: usize) { let end = self.0.va + len; assert!(end <= self.0.barrier_va.end); assert!(end % C::BASE_PAGE_SIZE == 0); @@ -597,7 +594,7 @@ where /// /// This function will panic if: /// - the range to be protected is out of the range where the cursor is required to operate. - pub(crate) unsafe fn protect( + pub unsafe fn protect( &mut self, len: usize, mut op: impl FnMut(&mut PageProperty), diff --git a/ostd/src/mm/page_table/mod.rs b/ostd/src/mm/page_table/mod.rs index 91e3fa508..118f76de9 100644 --- a/ostd/src/mm/page_table/mod.rs +++ b/ostd/src/mm/page_table/mod.rs @@ -14,8 +14,8 @@ use crate::{ mod node; use node::*; -mod cursor; -pub(crate) use cursor::{Cursor, CursorMut, PageTableQueryResult}; +pub mod cursor; +pub use cursor::{Cursor, CursorMut, PageTableQueryResult}; #[cfg(ktest)] mod test; @@ -78,7 +78,7 @@ const fn pte_index(va: Vaddr, level: PagingLevel) -> usize /// A handle to a page table. /// A page table can track the lifetime of the mapped physical pages. #[derive(Debug)] -pub(crate) struct PageTable< +pub struct PageTable< M: PageTableMode, E: PageTableEntryTrait = PageTableEntry, C: PagingConstsTrait = PagingConsts, @@ -90,7 +90,7 @@ pub(crate) struct PageTable< } impl PageTable { - pub(crate) fn activate(&self) { + pub fn activate(&self) { // SAFETY: The usermode page table is safe to activate since the kernel // mappings are shared. unsafe { @@ -102,7 +102,7 @@ impl PageTable { /// new page table. /// /// TODO: We may consider making the page table itself copy-on-write. - pub(crate) fn fork_copy_on_write(&self) -> Self { + pub fn fork_copy_on_write(&self) -> Self { let mut cursor = self.cursor_mut(&UserMode::VADDR_RANGE).unwrap(); // SAFETY: Protecting the user page table is safe. @@ -141,7 +141,7 @@ impl PageTable { /// /// Then, one can use a user page table to call [`fork_copy_on_write`], creating /// other child page tables. - pub(crate) fn create_user_page_table(&self) -> PageTable { + pub fn create_user_page_table(&self) -> PageTable { let root_node = self.root.clone_shallow().lock(); const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::(); @@ -159,7 +159,7 @@ impl PageTable { /// The virtual address range should be aligned to the root level page size. Considering /// usize overflows, the caller should provide the index range of the root level pages /// instead of the virtual address range. - pub(crate) fn make_shared_tables(&self, root_index: Range) { + pub fn make_shared_tables(&self, root_index: Range) { const NR_PTES_PER_NODE: usize = nr_subpage_per_huge::(); let start = root_index.start; @@ -184,7 +184,7 @@ where [(); C::NR_LEVELS as usize]:, { /// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only. - pub(crate) fn empty() -> Self { + pub fn empty() -> Self { PageTable { root: PageTableNode::::alloc(C::NR_LEVELS).into_raw(), _phantom: PhantomData, @@ -199,11 +199,11 @@ where /// /// It is dangerous to directly provide the physical address of the root page table to the /// hardware since the page table node may be dropped, resulting in UAF. - pub(crate) unsafe fn root_paddr(&self) -> Paddr { + pub unsafe fn root_paddr(&self) -> Paddr { self.root.paddr() } - pub(crate) unsafe fn map( + pub unsafe fn map( &self, vaddr: &Range, paddr: &Range, @@ -213,12 +213,12 @@ where Ok(()) } - pub(crate) unsafe fn unmap(&self, vaddr: &Range) -> Result<(), PageTableError> { + pub unsafe fn unmap(&self, vaddr: &Range) -> Result<(), PageTableError> { self.cursor_mut(vaddr)?.unmap(vaddr.len()); Ok(()) } - pub(crate) unsafe fn protect( + pub unsafe fn protect( &self, vaddr: &Range, op: impl FnMut(&mut PageProperty), @@ -235,16 +235,16 @@ where /// cursors concurrently accessing the same virtual address range, just like what /// happens for the hardware MMU walk. #[cfg(ktest)] - pub(crate) fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> { + pub fn query(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> { // SAFETY: The root node is a valid page table node so the address is valid. unsafe { page_walk::(self.root_paddr(), vaddr) } } /// Create a new cursor exclusively accessing the virtual address range for mapping. /// - /// If another cursor is already accessing the range, the new cursor will wait until the + /// If another cursor is already accessing the range, the new cursor may wait until the /// previous cursor is dropped. - pub(crate) fn cursor_mut( + pub fn cursor_mut( &'a self, va: &Range, ) -> Result, PageTableError> { @@ -253,19 +253,17 @@ where /// Create a new cursor exclusively accessing the virtual address range for querying. /// - /// If another cursor is already accessing the range, the new cursor will wait until the - /// previous cursor is dropped. - pub(crate) fn cursor( - &'a self, - va: &Range, - ) -> Result, PageTableError> { + /// If another cursor is already accessing the range, the new cursor may wait until the + /// previous cursor is dropped. The modification to the mapping by the cursor may also + /// block or be overriden by the mapping of another cursor. + pub fn cursor(&'a self, va: &Range) -> Result, PageTableError> { Cursor::new(self, va) } /// Create a new reference to the same page table. /// The caller must ensure that the kernel page table is not copied. /// This is only useful for IOMMU page tables. Think twice before using it in other cases. - pub(crate) unsafe fn shallow_copy(&self) -> Self { + pub unsafe fn shallow_copy(&self) -> Self { PageTable { root: self.root.clone_shallow(), _phantom: PhantomData, @@ -340,9 +338,7 @@ pub(super) unsafe fn page_walk( /// The interface for defining architecture-specific page table entries. /// /// Note that a default PTE shoud be a PTE that points to nothing. -pub(crate) trait PageTableEntryTrait: - Clone + Copy + Debug + Default + Pod + Sized + Sync -{ +pub trait PageTableEntryTrait: Clone + Copy + Debug + Default + Pod + Sized + Sync { /// Create a set of new invalid page table flags that indicates an absent page. /// /// Note that currently the implementation requires an all zero PTE to be an absent PTE.