diff --git a/kernel/src/vm/vmar/mod.rs b/kernel/src/vm/vmar/mod.rs index 4cdd850de..f2fb84690 100644 --- a/kernel/src/vm/vmar/mod.rs +++ b/kernel/src/vm/vmar/mod.rs @@ -339,10 +339,8 @@ impl Vmar_ { } fn clear_vm_space(&self) { - let start = ROOT_VMAR_LOWEST_ADDR; - let end = ROOT_VMAR_CAP_ADDR; - let mut cursor = self.vm_space.cursor_mut(&(start..end)).unwrap(); - cursor.unmap(end - start); + let mut cursor = self.vm_space.cursor_mut(&(0..ROOT_VMAR_CAP_ADDR)).unwrap(); + cursor.unmap(ROOT_VMAR_CAP_ADDR); } pub fn destroy(&self, range: Range) -> Result<()> { diff --git a/ostd/src/mm/page_table/cursor.rs b/ostd/src/mm/page_table/cursor.rs index 9a643ee1d..00f65b3e9 100644 --- a/ostd/src/mm/page_table/cursor.rs +++ b/ostd/src/mm/page_table/cursor.rs @@ -65,7 +65,7 @@ //! table cursor should add additional entry point checks to prevent these defined //! behaviors if they are not wanted. -use core::{any::TypeId, marker::PhantomData, ops::Range}; +use core::{any::TypeId, marker::PhantomData, mem::ManuallyDrop, ops::Range}; use align_ext::AlignExt; @@ -74,7 +74,10 @@ use super::{ PageTableMode, PageTableNode, PagingConstsTrait, PagingLevel, UserMode, }; use crate::{ - mm::{page::DynPage, Paddr, PageProperty, Vaddr}, + mm::{ + page::{meta::PageTablePageMeta, DynPage, Page}, + Paddr, PageProperty, Vaddr, + }, task::{disable_preempt, DisabledPreemptGuard}, }; @@ -89,6 +92,9 @@ pub enum PageTableItem { page: DynPage, prop: PageProperty, }, + PageTableNode { + page: DynPage, + }, #[allow(dead_code)] MappedUntracked { va: Vaddr, @@ -587,8 +593,21 @@ where continue; } - // Level down if the current PTE points to a page table. - if !cur_pte.is_last(self.0.level) { + if self.0.va % page_size::(self.0.level) != 0 + || self.0.va + page_size::(self.0.level) > end + { + if !is_tracked { + // Level down if we are removing part of a huge untracked page. + self.level_down_split(); + continue; + } + + if cur_pte.is_last(self.0.level) { + panic!("removing part of a huge page"); + } + + // Level down if the current PTE points to a page table and we cannot + // unmap this page table node entirely. self.0.level_down(); // We have got down a level. If there's no mapped PTEs in @@ -602,22 +621,9 @@ where self.0.level_up(); self.0.move_forward(); } - continue; } - // Level down if we are removing part of a huge untracked page. - if self.0.va % page_size::(self.0.level) != 0 - || self.0.va + page_size::(self.0.level) > end - { - if !is_tracked { - self.level_down_split(); - continue; - } else { - panic!("removing part of a huge page"); - } - } - // Unmap the current page and return it. let idx = self.0.cur_idx(); let ret = self @@ -640,7 +646,12 @@ where len: ret_page_size, prop, }, - Child::None | Child::PageTable(_) => unreachable!(), + Child::PageTable(node) => { + let node = ManuallyDrop::new(node); + let page = Page::>::from_raw(node.paddr()); + PageTableItem::PageTableNode { page: page.into() } + } + Child::None => unreachable!(), }; } diff --git a/ostd/src/mm/vm_space.rs b/ostd/src/mm/vm_space.rs index 3c55d6c3b..ccd211eb3 100644 --- a/ostd/src/mm/vm_space.rs +++ b/ostd/src/mm/vm_space.rs @@ -333,6 +333,15 @@ impl CursorMut<'_, '_> { self.flusher .issue_tlb_flush_with(TlbFlushOp::Address(va), page); } + PageTableItem::PageTableNode { page } => { + if !self.flusher.need_remote_flush() && tlb_prefer_flush_all { + // Only on single-CPU cases we can drop the page immediately before flushing. + drop(page); + continue; + } + // If we unmap an entire page table node, we prefer directly flushing all TLBs. + self.flusher.issue_tlb_flush_with(TlbFlushOp::All, page); + } PageTableItem::NotMapped { .. } => { break; } @@ -461,6 +470,9 @@ impl TryFrom for VmItem { PageTableItem::MappedUntracked { .. } => { Err("found untracked memory mapped into `VmSpace`") } + PageTableItem::PageTableNode { .. } => { + unreachable!() + } } } }