mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-22 17:03:23 +00:00
Allow the page table un-mapper to flush the TLB precisely
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
e597a10088
commit
9a6e1b03e3
@ -526,24 +526,35 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Unmaps the range starting from the current address with the given length of virtual address.
|
||||
/// Find and remove the first page in the cursor's following range.
|
||||
///
|
||||
/// Already-absent mappings encountered by the cursor will be skipped. It is valid to unmap a
|
||||
/// range that is not mapped.
|
||||
/// The range to be found in is the current virtual address with the
|
||||
/// provided length.
|
||||
///
|
||||
/// The function stops and yields the page if it has actually removed a
|
||||
/// page, no matter if the following pages are also required to be unmapped.
|
||||
/// The returned page is the virtual page that existed before the removal
|
||||
/// but having just been unmapped.
|
||||
///
|
||||
/// It also makes the cursor moves forward to the next page after the
|
||||
/// removed one, when an actual page is removed. If no mapped pages exist
|
||||
/// in the following range, the cursor will stop at the end of the range
|
||||
/// and return [`PageTableItem::NotMapped`].
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller should ensure that the range being unmapped does not affect kernel's memory safety.
|
||||
/// The caller should ensure that the range being unmapped does not affect
|
||||
/// kernel's memory safety.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if:
|
||||
/// - the range to be unmapped is out of the range where the cursor is required to operate;
|
||||
/// - the range covers only a part of a page.
|
||||
pub unsafe fn unmap(&mut self, len: usize) {
|
||||
let end = self.0.va + len;
|
||||
/// This function will panic if the end range covers a part of a huge page
|
||||
/// and the next page is that huge page.
|
||||
pub unsafe fn take_next(&mut self, len: usize) -> PageTableItem {
|
||||
let start = self.0.va;
|
||||
assert!(len % page_size::<C>(1) == 0);
|
||||
let end = start + len;
|
||||
assert!(end <= self.0.barrier_va.end);
|
||||
assert!(end % C::BASE_PAGE_SIZE == 0);
|
||||
|
||||
while self.0.va < end {
|
||||
let cur_pte = self.0.read_cur_pte();
|
||||
@ -552,49 +563,70 @@ where
|
||||
// Skip if it is already absent.
|
||||
if !cur_pte.is_present() {
|
||||
if self.0.va + page_size::<C>(self.0.level) > end {
|
||||
self.0.va = end;
|
||||
break;
|
||||
}
|
||||
self.0.move_forward();
|
||||
continue;
|
||||
}
|
||||
|
||||
// We check among the conditions that may lead to a level down.
|
||||
// We ensure not unmapping in reserved kernel shared tables or releasing it.
|
||||
let is_kernel_shared_node =
|
||||
TypeId::of::<M>() == TypeId::of::<KernelMode>() && self.0.level >= C::NR_LEVELS - 1;
|
||||
if is_kernel_shared_node
|
||||
|| self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
if cur_pte.is_present() && !cur_pte.is_last(self.0.level) {
|
||||
self.0.level_down();
|
||||
// Level down if the current PTE points to a page table.
|
||||
if !cur_pte.is_last(self.0.level) {
|
||||
self.0.level_down();
|
||||
|
||||
// We have got down a level. If there's no mapped PTEs in
|
||||
// the current node, we can go back and skip to save time.
|
||||
if self.0.guards[(self.0.level - 1) as usize]
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.nr_children()
|
||||
== 0
|
||||
{
|
||||
self.0.level_up();
|
||||
self.0.move_forward();
|
||||
continue;
|
||||
}
|
||||
} else if !is_tracked {
|
||||
self.level_down_split();
|
||||
} else {
|
||||
unreachable!();
|
||||
// We have got down a level. If there's no mapped PTEs in
|
||||
// the current node, we can go back and skip to save time.
|
||||
if self.0.guards[(self.0.level - 1) as usize]
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.nr_children()
|
||||
== 0
|
||||
{
|
||||
self.0.level_up();
|
||||
self.0.move_forward();
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Unmap the current page.
|
||||
// Level down if we are removing part of a huge untracked page.
|
||||
if self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end
|
||||
{
|
||||
if !is_tracked {
|
||||
self.level_down_split();
|
||||
continue;
|
||||
} else {
|
||||
panic!("removing part of a huge page");
|
||||
}
|
||||
}
|
||||
|
||||
// Unmap the current page and return it.
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().unset_child(idx, is_tracked);
|
||||
let ret = self.cur_node_mut().take_child(idx, is_tracked);
|
||||
let ret_page_va = self.0.va;
|
||||
let ret_page_size = page_size::<C>(self.0.level);
|
||||
|
||||
self.0.move_forward();
|
||||
|
||||
return match ret {
|
||||
Child::Page(page) => PageTableItem::Mapped {
|
||||
va: ret_page_va,
|
||||
page,
|
||||
prop: cur_pte.prop(),
|
||||
},
|
||||
Child::Untracked(pa) => PageTableItem::MappedUntracked {
|
||||
va: ret_page_va,
|
||||
pa,
|
||||
len: ret_page_size,
|
||||
prop: cur_pte.prop(),
|
||||
},
|
||||
Child::None | Child::PageTable(_) => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
// If the loop exits, we did not find any mapped pages in the range.
|
||||
PageTableItem::NotMapped { va: start, len }
|
||||
}
|
||||
|
||||
/// Applies the given operation to all the mappings within the range.
|
||||
|
Reference in New Issue
Block a user