mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-24 18:03:25 +00:00
Replace in_untracked_range
by in_tracked_range
This commit is contained in:
committed by
Tate, Hongliang Tian
parent
52ee28880d
commit
a1f36979d7
@ -261,7 +261,7 @@ where
|
||||
|
||||
fn cur_child(&self) -> Child<E, C> {
|
||||
self.cur_node()
|
||||
.child(self.cur_idx(), !self.in_untracked_range())
|
||||
.child(self.cur_idx(), self.in_tracked_range())
|
||||
}
|
||||
|
||||
fn read_cur_pte(&self) -> E {
|
||||
@ -275,10 +275,10 @@ where
|
||||
///
|
||||
/// All mappings in the user mode are tracked. And all mappings in the IOMMU
|
||||
/// page table are untracked.
|
||||
fn in_untracked_range(&self) -> bool {
|
||||
TypeId::of::<M>() == TypeId::of::<crate::arch::iommu::DeviceMode>()
|
||||
|| TypeId::of::<M>() == TypeId::of::<KernelMode>()
|
||||
&& !crate::mm::kspace::VMALLOC_VADDR_RANGE.contains(&self.va)
|
||||
fn in_tracked_range(&self) -> bool {
|
||||
TypeId::of::<M>() != TypeId::of::<crate::arch::iommu::DeviceMode>()
|
||||
&& (TypeId::of::<M>() != TypeId::of::<KernelMode>()
|
||||
|| crate::mm::kspace::VMALLOC_VADDR_RANGE.contains(&self.va))
|
||||
}
|
||||
}
|
||||
|
||||
@ -375,7 +375,7 @@ where
|
||||
pub(crate) unsafe fn map(&mut self, frame: Frame, prop: PageProperty) {
|
||||
let end = self.0.va + frame.size();
|
||||
assert!(end <= self.0.barrier_va.end);
|
||||
debug_assert!(!self.0.in_untracked_range());
|
||||
debug_assert!(self.0.in_tracked_range());
|
||||
|
||||
// Go down if not applicable.
|
||||
while self.0.level > C::HIGHEST_TRANSLATION_LEVEL
|
||||
@ -457,7 +457,7 @@ where
|
||||
}
|
||||
|
||||
// Map the current page.
|
||||
debug_assert!(self.0.in_untracked_range());
|
||||
debug_assert!(!self.0.in_tracked_range());
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().set_child_untracked(idx, pa, prop);
|
||||
|
||||
@ -485,7 +485,7 @@ where
|
||||
|
||||
while self.0.va < end {
|
||||
let cur_pte = self.0.read_cur_pte();
|
||||
let untracked = self.0.in_untracked_range();
|
||||
let is_tracked = self.0.in_tracked_range();
|
||||
|
||||
// Skip if it is already invalid.
|
||||
if !cur_pte.is_present() {
|
||||
@ -506,7 +506,7 @@ where
|
||||
{
|
||||
if cur_pte.is_present() && !cur_pte.is_last(self.0.level) {
|
||||
self.0.level_down();
|
||||
} else if untracked {
|
||||
} else if !is_tracked {
|
||||
self.level_down_split();
|
||||
} else {
|
||||
unreachable!();
|
||||
@ -516,7 +516,7 @@ where
|
||||
|
||||
// Unmap the current page.
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().unset_child(idx, untracked);
|
||||
self.cur_node_mut().unset_child(idx, is_tracked);
|
||||
|
||||
self.0.move_forward();
|
||||
}
|
||||
@ -564,7 +564,7 @@ where
|
||||
// of untracked huge pages.
|
||||
let vaddr_not_fit = self.0.va % page_size::<C>(self.0.level) != 0
|
||||
|| self.0.va + page_size::<C>(self.0.level) > end;
|
||||
if self.0.in_untracked_range() && vaddr_not_fit {
|
||||
if !self.0.in_tracked_range() && vaddr_not_fit {
|
||||
self.level_down_split();
|
||||
continue;
|
||||
} else if vaddr_not_fit {
|
||||
@ -608,9 +608,9 @@ where
|
||||
|
||||
let new_frame = PageTableNode::<E, C>::alloc(self.0.level - 1);
|
||||
let idx = self.0.cur_idx();
|
||||
let untracked = self.0.in_untracked_range();
|
||||
let is_tracked = self.0.in_tracked_range();
|
||||
self.cur_node_mut()
|
||||
.set_child_pt(idx, new_frame.clone_raw(), untracked);
|
||||
.set_child_pt(idx, new_frame.clone_raw(), is_tracked);
|
||||
self.0.level -= 1;
|
||||
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_frame);
|
||||
}
|
||||
@ -620,7 +620,7 @@ where
|
||||
/// This method will split the huge page and go down to the next level.
|
||||
fn level_down_split(&mut self) {
|
||||
debug_assert!(self.0.level > 1);
|
||||
debug_assert!(self.0.in_untracked_range());
|
||||
debug_assert!(!self.0.in_tracked_range());
|
||||
|
||||
let idx = self.0.cur_idx();
|
||||
self.cur_node_mut().split_untracked_huge(idx);
|
||||
|
@ -266,7 +266,7 @@ where
|
||||
}
|
||||
|
||||
/// Gets an extra reference of the child at the given index.
|
||||
pub(super) fn child(&self, idx: usize, tracked: bool) -> Child<E, C> {
|
||||
pub(super) fn child(&self, idx: usize, in_tracked_range: bool) -> Child<E, C> {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
let pte = self.read_pte(idx);
|
||||
@ -287,7 +287,7 @@ where
|
||||
level: self.level() - 1,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
} else if tracked {
|
||||
} else if in_tracked_range {
|
||||
// SAFETY: The physical address is recorded in a valid PTE
|
||||
// which would be casted from a handle. We are incrementing
|
||||
// the reference count so we restore and forget a cloned one.
|
||||
@ -320,7 +320,7 @@ where
|
||||
let mut new_frame = Self::alloc(self.level());
|
||||
|
||||
for i in deep {
|
||||
match self.child(i, /*meaningless*/ true) {
|
||||
match self.child(i, true) {
|
||||
Child::PageTable(pt) => {
|
||||
let guard = pt.clone_shallow().lock();
|
||||
let new_child = guard.make_copy(0..nr_subpage_per_huge::<C>(), 0..0);
|
||||
@ -354,10 +354,10 @@ where
|
||||
}
|
||||
|
||||
/// Removes a child if the child at the given index is present.
|
||||
pub(super) fn unset_child(&mut self, idx: usize, in_untracked_range: bool) {
|
||||
pub(super) fn unset_child(&mut self, idx: usize, in_tracked_range: bool) {
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
self.overwrite_pte(idx, None, in_untracked_range);
|
||||
self.overwrite_pte(idx, None, in_tracked_range);
|
||||
}
|
||||
|
||||
/// Sets a child page table at a given index.
|
||||
@ -365,14 +365,14 @@ where
|
||||
&mut self,
|
||||
idx: usize,
|
||||
pt: RawPageTableNode<E, C>,
|
||||
in_untracked_range: bool,
|
||||
in_tracked_range: bool,
|
||||
) {
|
||||
// They should be ensured by the cursor.
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
debug_assert_eq!(pt.level, self.level() - 1);
|
||||
|
||||
let pte = Some(E::new_pt(pt.paddr()));
|
||||
self.overwrite_pte(idx, pte, in_untracked_range);
|
||||
self.overwrite_pte(idx, pte, in_tracked_range);
|
||||
// The ownership is transferred to a raw PTE. Don't drop the handle.
|
||||
let _ = ManuallyDrop::new(pt);
|
||||
}
|
||||
@ -384,7 +384,7 @@ where
|
||||
debug_assert_eq!(frame.level(), self.level());
|
||||
|
||||
let pte = Some(E::new_frame(frame.start_paddr(), self.level(), prop));
|
||||
self.overwrite_pte(idx, pte, false);
|
||||
self.overwrite_pte(idx, pte, true);
|
||||
// The ownership is transferred to a raw PTE. Don't drop the handle.
|
||||
let _ = ManuallyDrop::new(frame);
|
||||
}
|
||||
@ -399,7 +399,7 @@ where
|
||||
debug_assert!(idx < nr_subpage_per_huge::<C>());
|
||||
|
||||
let pte = Some(E::new_frame(pa, self.level(), prop));
|
||||
self.overwrite_pte(idx, pte, true);
|
||||
self.overwrite_pte(idx, pte, false);
|
||||
}
|
||||
|
||||
/// Reads the info from a page table entry at a given index.
|
||||
@ -426,7 +426,7 @@ where
|
||||
unsafe { new_frame.set_child_untracked(i, small_pa, prop) };
|
||||
}
|
||||
|
||||
self.set_child_pt(idx, new_frame.into_raw(), true);
|
||||
self.set_child_pt(idx, new_frame.into_raw(), false);
|
||||
}
|
||||
|
||||
/// Protects an already mapped child at a given index.
|
||||
@ -461,7 +461,7 @@ where
|
||||
///
|
||||
/// The caller in this module will ensure that the PTE points to initialized
|
||||
/// memory if the child is a page table.
|
||||
fn overwrite_pte(&mut self, idx: usize, pte: Option<E>, in_untracked_range: bool) {
|
||||
fn overwrite_pte(&mut self, idx: usize, pte: Option<E>, in_tracked_range: bool) {
|
||||
let existing_pte = self.read_pte(idx);
|
||||
|
||||
if existing_pte.is_present() {
|
||||
@ -484,7 +484,7 @@ where
|
||||
if !existing_pte.is_last(self.level()) {
|
||||
// This is a page table.
|
||||
drop(Page::<PageTablePageMeta<E, C>>::from_raw(paddr));
|
||||
} else if !in_untracked_range {
|
||||
} else if in_tracked_range {
|
||||
// This is a frame.
|
||||
drop(Page::<FrameMeta>::from_raw(paddr));
|
||||
}
|
||||
|
Reference in New Issue
Block a user