mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-10 13:56:48 +00:00
Allow the page table un-mapper to flush the TLB precisely
This commit is contained in:
parent
e597a10088
commit
9a6e1b03e3
@ -103,18 +103,11 @@ pub(super) struct Vmar_ {
|
||||
|
||||
impl Drop for Vmar_ {
|
||||
fn drop(&mut self) {
|
||||
if self.is_root_vmar() {
|
||||
self.vm_space.clear();
|
||||
}
|
||||
// Drop the child VMAR.
|
||||
// FIXME: This branch can be removed once removing child VMAR usage from the code base.
|
||||
else {
|
||||
let mut cursor = self
|
||||
.vm_space
|
||||
.cursor_mut(&(self.base..self.base + self.size))
|
||||
.unwrap();
|
||||
cursor.unmap(self.size);
|
||||
}
|
||||
let mut cursor = self
|
||||
.vm_space
|
||||
.cursor_mut(&(self.base..self.base + self.size))
|
||||
.unwrap();
|
||||
cursor.unmap(self.size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -302,7 +295,12 @@ impl Vmar_ {
|
||||
if !self.is_root_vmar() {
|
||||
return_errno_with_message!(Errno::EACCES, "The vmar is not root vmar");
|
||||
}
|
||||
self.vm_space.clear();
|
||||
let mut cursor = self
|
||||
.vm_space
|
||||
.cursor_mut(&(self.base..self.base + self.size))
|
||||
.unwrap();
|
||||
cursor.unmap(self.size);
|
||||
drop(cursor);
|
||||
let mut inner = self.inner.lock();
|
||||
inner.child_vmar_s.clear();
|
||||
inner.vm_mappings.clear();
|
||||
|
@ -13,7 +13,7 @@ use crate::{
|
||||
mm::{
|
||||
dma::Daddr,
|
||||
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
|
||||
page_table::PageTableError,
|
||||
page_table::{PageTableError, PageTableItem},
|
||||
Frame, FrameAllocOptions, Paddr, PageFlags, PageTable, VmIo, PAGE_SIZE,
|
||||
},
|
||||
Pod,
|
||||
@ -312,10 +312,11 @@ impl ContextTable {
|
||||
if device.device >= 32 || device.function >= 8 {
|
||||
return Err(ContextTableError::InvalidDeviceId);
|
||||
}
|
||||
let pt = self.get_or_create_page_table(device);
|
||||
let mut cursor = pt.cursor_mut(&(daddr..daddr + PAGE_SIZE)).unwrap();
|
||||
unsafe {
|
||||
self.get_or_create_page_table(device)
|
||||
.unmap(&(daddr..daddr + PAGE_SIZE))
|
||||
.unwrap();
|
||||
let result = cursor.take_next(PAGE_SIZE);
|
||||
debug_assert!(matches!(result, PageTableItem::MappedUntracked { .. }));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -526,24 +526,35 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Unmaps the range starting from the current address with the given length of virtual address.
|
||||
/// Find and remove the first page in the cursor's following range.
|
||||
///
|
||||
/// Already-absent mappings encountered by the cursor will be skipped. It is valid to unmap a
|
||||
/// range that is not mapped.
|
||||
/// The range to be found in is the current virtual address with the
|
||||
/// provided length.
|
||||
///
|
||||
/// The function stops and yields the page if it has actually removed a
|
||||
/// page, no matter if the following pages are also required to be unmapped.
|
||||
/// The returned page is the virtual page that existed before the removal
|
||||
/// but having just been unmapped.
|
||||
///
|
||||
/// It also makes the cursor moves forward to the next page after the
|
||||
/// removed one, when an actual page is removed. If no mapped pages exist
|
||||
/// in the following range, the cursor will stop at the end of the range
|
||||
/// and return [`PageTableItem::NotMapped`].
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that the range being unmapped does not affect kernel's memory safety.
|
||||
/// The caller should ensure that the range being unmapped does not affect
|
||||
/// kernel's memory safety.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if:
|
||||
/// - the range to be unmapped is out of the range where the cursor is required to operate;
|
||||
/// - the range covers only a part of a page.
|
||||
pub unsafe fn unmap(&mut self, len: usize) {
|
||||
let end = self.0.va + len;
|
||||
/// This function will panic if the end range covers a part of a huge page
|
||||
/// and the next page is that huge page.
|
||||
pub unsafe fn take_next(&mut self, len: usize) -> PageTableItem {
|
||||
let start = self.0.va;
|
||||
assert!(len % page_size::<C>(1) == 0);
|
||||
let end = start + len;
|
||||
assert!(end <= self.0.barrier_va.end);
|
||||
assert!(end % C::BASE_PAGE_SIZE == 0);
|
||||
|
||||
while self.0.va < end {
|
||||
let cur_pte = self.0.read_cur_pte();
|
||||
@ -552,49 +563,70 @@ where
|
||||
// Skip if it is already absent.
|
||||
if !cur_pte.is_present() {
|
||||
if self.0.va + page_size::<C>(self.0.level) > end {
|
||||
self.0.va = end;
|
||||
break;
|
||||
}
|
||||
self.0.move_forward();
|
||||
continue;
|
||||
}
|
||||
|
||||
// We check among the conditions that may lead to a level down.
|
||||
// We ensure not unmapping in reserved kernel shared tables or releasing it.
|
||||
let is_kernel_shared_node =
|
||||
TypeId::of::<M>() == TypeId::of::<KernelMode>() && self.0.level >= C::NR_LEVELS - 1;
|
||||
if is_kernel_shared_node
|
||||
|| self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
if cur_pte.is_present() && !cur_pte.is_last(self.0.level) {
|
||||
self.0.level_down();
|
||||
// Level down if the current PTE points to a page table.
|
||||
if !cur_pte.is_last(self.0.level) {
|
||||
self.0.level_down();
|
||||
|
||||
// We have got down a level. If there's no mapped PTEs in
|
||||
// the current node, we can go back and skip to save time.
|
||||
if self.0.guards[(self.0.level - 1) as usize]
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.nr_children()
|
||||
== 0
|
||||
{
|
||||
self.0.level_up();
|
||||
self.0.move_forward();
|
||||
continue;
|
||||
}
|
||||
} else if !is_tracked {
|
||||
self.level_down_split();
|
||||
} else {
|
||||
unreachable!();
|
||||
// We have got down a level. If there's no mapped PTEs in
|
||||
// the current node, we can go back and skip to save time.
|
||||
if self.0.guards[(self.0.level - 1) as usize]
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.nr_children()
|
||||
== 0
|
||||
{
|
||||
self.0.level_up();
|
||||
self.0.move_forward();
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Unmap the current page.
|
||||
// Level down if we are removing part of a huge untracked page.
|
||||
if self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
if !is_tracked {
|
||||
self.level_down_split();
|
||||
continue;
|
||||
} else {
|
||||
panic!("removing part of a huge page");
|
||||
}
|
||||
}
|
||||
|
||||
// Unmap the current page and return it.
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().unset_child(idx, is_tracked);
|
||||
let ret = self.cur_node_mut().take_child(idx, is_tracked);
|
||||
let ret_page_va = self.0.va;
|
||||
let ret_page_size = page_size::<C>(self.0.level);
|
||||
|
||||
self.0.move_forward();
|
||||
|
||||
return match ret {
|
||||
Child::Page(page) => PageTableItem::Mapped {
|
||||
va: ret_page_va,
|
||||
page,
|
||||
prop: cur_pte.prop(),
|
||||
},
|
||||
Child::Untracked(pa) => PageTableItem::MappedUntracked {
|
||||
va: ret_page_va,
|
||||
pa,
|
||||
len: ret_page_size,
|
||||
prop: cur_pte.prop(),
|
||||
},
|
||||
Child::None | Child::PageTable(_) => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
// If the loop exits, we did not find any mapped pages in the range.
|
||||
PageTableItem::NotMapped { va: start, len }
|
||||
}
|
||||
|
||||
/// Applies the given operation to all the mappings within the range.
|
||||
|
@ -213,11 +213,6 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub unsafe fn unmap(&self, vaddr: &Range<Vaddr>) -> Result<(), PageTableError> {
|
||||
self.cursor_mut(vaddr)?.unmap(vaddr.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub unsafe fn protect(
|
||||
&self,
|
||||
vaddr: &Range<Vaddr>,
|
||||
@ -296,7 +291,7 @@ pub(super) unsafe fn page_walk<E: PageTableEntryTrait, C: PagingConstsTrait>(
|
||||
) -> Option<(Paddr, PageProperty)> {
|
||||
use super::paddr_to_vaddr;
|
||||
|
||||
let preempt_guard = crate::task::disable_preempt();
|
||||
let _guard = crate::trap::disable_local();
|
||||
|
||||
let mut cur_level = C::NR_LEVELS;
|
||||
let mut cur_pte = {
|
||||
|
@ -312,6 +312,31 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the child at the given index and return it.
|
||||
pub(super) fn take_child(&mut self, idx: usize, in_tracked_range: bool) -> Child<E, C> {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
let pte = self.read_pte(idx);
|
||||
if !pte.is_present() {
|
||||
Child::None
|
||||
} else {
|
||||
let paddr = pte.paddr();
|
||||
let is_last = pte.is_last(self.level());
|
||||
*self.nr_children_mut() -= 1;
|
||||
self.write_pte(idx, E::new_absent());
|
||||
if !is_last {
|
||||
Child::PageTable(RawPageTableNode {
|
||||
raw: paddr,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
} else if in_tracked_range {
|
||||
Child::Page(unsafe { DynPage::from_raw(paddr) })
|
||||
} else {
|
||||
Child::Untracked(paddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Makes a copy of the page table node.
|
||||
///
|
||||
/// This function allows you to control about the way to copy the children.
|
||||
@ -365,13 +390,6 @@ where
|
||||
new_pt
|
||||
}
|
||||
|
||||
/// Removes a child if the child at the given index is present.
|
||||
pub(super) fn unset_child(&mut self, idx: usize, in_tracked_range: bool) {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
self.overwrite_pte(idx, None, in_tracked_range);
|
||||
}
|
||||
|
||||
/// Sets a child page table at a given index.
|
||||
pub(super) fn set_child_pt(
|
||||
&mut self,
|
||||
@ -462,6 +480,17 @@ where
|
||||
unsafe { self.as_ptr().add(idx).read() }
|
||||
}
|
||||
|
||||
/// Writes a page table entry at a given index.
|
||||
///
|
||||
/// This operation will leak the old child if the PTE is present.
|
||||
fn write_pte(&mut self, idx: usize, pte: E) {
|
||||
// It should be ensured by the cursor.
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
// SAFETY: the index is within the bound and PTE is plain-old-data.
|
||||
unsafe { (self.as_ptr() as *mut E).add(idx).write(pte) };
|
||||
}
|
||||
|
||||
/// The number of valid PTEs.
|
||||
pub(super) fn nr_children(&self) -> u16 {
|
||||
// SAFETY: The lock is held so there is no mutable reference to it.
|
||||
@ -485,14 +514,7 @@ where
|
||||
let existing_pte = self.read_pte(idx);
|
||||
|
||||
if existing_pte.is_present() {
|
||||
// SAFETY: The index is within the bound and the address is aligned.
|
||||
// The validity of the PTE is checked within this module.
|
||||
// The safetiness also holds in the following branch.
|
||||
unsafe {
|
||||
(self.as_ptr() as *mut E)
|
||||
.add(idx)
|
||||
.write(pte.unwrap_or(E::new_absent()))
|
||||
};
|
||||
self.write_pte(idx, pte.unwrap_or(E::new_absent()));
|
||||
|
||||
// Drop the child. We must set the PTE before dropping the child.
|
||||
// Just restore the handle and drop the handle.
|
||||
|
@ -23,9 +23,6 @@ fn test_range_check() {
|
||||
assert!(pt.cursor_mut(&good_va).is_ok());
|
||||
assert!(pt.cursor_mut(&bad_va).is_err());
|
||||
assert!(pt.cursor_mut(&bad_va2).is_err());
|
||||
assert!(unsafe { pt.unmap(&good_va) }.is_ok());
|
||||
assert!(unsafe { pt.unmap(&bad_va) }.is_err());
|
||||
assert!(unsafe { pt.unmap(&bad_va2) }.is_err());
|
||||
}
|
||||
|
||||
#[ktest]
|
||||
@ -38,7 +35,10 @@ fn test_tracked_map_unmap() {
|
||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) };
|
||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
unsafe { pt.unmap(&from).unwrap() };
|
||||
assert!(matches!(
|
||||
unsafe { pt.cursor_mut(&from).unwrap().take_next(from.len()) },
|
||||
PageTableItem::Mapped { .. }
|
||||
));
|
||||
assert!(pt.query(from.start + 10).is_none());
|
||||
}
|
||||
|
||||
@ -53,13 +53,18 @@ fn test_untracked_map_unmap() {
|
||||
UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.start..UNTRACKED_OFFSET + PAGE_SIZE * from_ppn.end;
|
||||
let to = PAGE_SIZE * to_ppn.start..PAGE_SIZE * to_ppn.end;
|
||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||
|
||||
unsafe { pt.map(&from, &to, prop).unwrap() };
|
||||
for i in 0..100 {
|
||||
let offset = i * (PAGE_SIZE + 1000);
|
||||
assert_eq!(pt.query(from.start + offset).unwrap().0, to.start + offset);
|
||||
}
|
||||
let unmap = UNTRACKED_OFFSET + PAGE_SIZE * 123..UNTRACKED_OFFSET + PAGE_SIZE * 3434;
|
||||
unsafe { pt.unmap(&unmap).unwrap() };
|
||||
|
||||
let unmap = UNTRACKED_OFFSET + PAGE_SIZE * 13456..UNTRACKED_OFFSET + PAGE_SIZE * 15678;
|
||||
assert!(matches!(
|
||||
unsafe { pt.cursor_mut(&unmap).unwrap().take_next(unmap.len()) },
|
||||
PageTableItem::MappedUntracked { .. }
|
||||
));
|
||||
for i in 0..100 {
|
||||
let offset = i * (PAGE_SIZE + 10);
|
||||
if unmap.start <= from.start + offset && from.start + offset < unmap.end {
|
||||
@ -82,7 +87,10 @@ fn test_user_copy_on_write() {
|
||||
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
|
||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
unsafe { pt.unmap(&from).unwrap() };
|
||||
assert!(matches!(
|
||||
unsafe { pt.cursor_mut(&from).unwrap().take_next(from.len()) },
|
||||
PageTableItem::Mapped { .. }
|
||||
));
|
||||
assert!(pt.query(from.start + 10).is_none());
|
||||
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
|
||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
@ -90,7 +98,10 @@ fn test_user_copy_on_write() {
|
||||
let child_pt = pt.fork_copy_on_write();
|
||||
assert_eq!(pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
assert_eq!(child_pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
unsafe { pt.unmap(&from).unwrap() };
|
||||
assert!(matches!(
|
||||
unsafe { pt.cursor_mut(&from).unwrap().take_next(from.len()) },
|
||||
PageTableItem::Mapped { .. }
|
||||
));
|
||||
assert!(pt.query(from.start + 10).is_none());
|
||||
assert_eq!(child_pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
|
||||
@ -99,7 +110,10 @@ fn test_user_copy_on_write() {
|
||||
assert_eq!(child_pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
drop(pt);
|
||||
assert_eq!(child_pt.query(from.start + 10).unwrap().0, start_paddr + 10);
|
||||
unsafe { child_pt.unmap(&from).unwrap() };
|
||||
assert!(matches!(
|
||||
unsafe { child_pt.cursor_mut(&from).unwrap().take_next(from.len()) },
|
||||
PageTableItem::Mapped { .. }
|
||||
));
|
||||
assert!(child_pt.query(from.start + 10).is_none());
|
||||
unsafe {
|
||||
sibling_pt
|
||||
|
@ -21,8 +21,8 @@ use super::{
|
||||
};
|
||||
use crate::{
|
||||
arch::mm::{
|
||||
current_page_table_paddr, tlb_flush_addr_range, tlb_flush_all_excluding_global,
|
||||
PageTableEntry, PagingConsts,
|
||||
current_page_table_paddr, tlb_flush_addr, tlb_flush_addr_range,
|
||||
tlb_flush_all_excluding_global, PageTableEntry, PagingConsts,
|
||||
},
|
||||
cpu::CpuExceptionInfo,
|
||||
mm::{
|
||||
@ -123,16 +123,6 @@ impl VmSpace {
|
||||
self.page_fault_handler.call_once(|| func);
|
||||
}
|
||||
|
||||
/// Clears all mappings
|
||||
pub fn clear(&self) {
|
||||
// SAFETY: unmapping user space is safe, and we don't care unmapping
|
||||
// invalid ranges.
|
||||
unsafe {
|
||||
self.pt.unmap(&(0..MAX_USERSPACE_VADDR)).unwrap();
|
||||
}
|
||||
tlb_flush_all_excluding_global();
|
||||
}
|
||||
|
||||
/// Forks a new VM space with copy-on-write semantics.
|
||||
///
|
||||
/// Both the parent and the newly forked VM space will be marked as
|
||||
@ -297,15 +287,26 @@ impl CursorMut<'_> {
|
||||
/// This method will panic if `len` is not page-aligned.
|
||||
pub fn unmap(&mut self, len: usize) {
|
||||
assert!(len % super::PAGE_SIZE == 0);
|
||||
let start_va = self.virt_addr();
|
||||
let end_va = start_va + len;
|
||||
let end_va = self.virt_addr() + len;
|
||||
|
||||
// SAFETY: It is safe to un-map memory in the userspace.
|
||||
unsafe {
|
||||
self.0.unmap(len);
|
||||
loop {
|
||||
// SAFETY: It is safe to un-map memory in the userspace.
|
||||
let result = unsafe { self.0.take_next(end_va - self.virt_addr()) };
|
||||
match result {
|
||||
PageTableItem::Mapped { va, page, .. } => {
|
||||
// TODO: Ask other processors to flush the TLB before we
|
||||
// release the page back to the allocator.
|
||||
tlb_flush_addr(va);
|
||||
drop(page);
|
||||
}
|
||||
PageTableItem::NotMapped { .. } => {
|
||||
break;
|
||||
}
|
||||
PageTableItem::MappedUntracked { .. } => {
|
||||
panic!("found untracked memory mapped into `VmSpace`");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tlb_flush_addr_range(&(start_va..end_va));
|
||||
}
|
||||
|
||||
/// Change the mapping property starting from the current slot.
|
||||
|
Loading…
x
Reference in New Issue
Block a user