Insert space lines for readability

This commit is contained in:
Ruihan Li
2024-06-04 21:17:36 +08:00
committed by Tate, Hongliang Tian
parent ebc9d04a1c
commit 52ee28880d
3 changed files with 80 additions and 5 deletions

View File

@ -120,6 +120,7 @@ where
if va.start % C::BASE_PAGE_SIZE != 0 || va.end % C::BASE_PAGE_SIZE != 0 {
return Err(PageTableError::UnalignedVaddr);
}
// Create a guard array that only hold the root node lock.
let guards = core::array::from_fn(|i| {
if i == 0 {
@ -136,25 +137,33 @@ where
barrier_va: va.clone(),
phantom: PhantomData,
};
// Go down and get proper locks. The cursor should hold a lock of a
// page table node containing the virtual address range.
//
// While going down, previous guards of too-high levels will be released.
loop {
let cur_pte = cursor.read_cur_pte();
let level_too_high = {
let start_idx = pte_index::<C>(va.start, cursor.level);
let end_idx = pte_index::<C>(va.end - 1, cursor.level);
start_idx == end_idx
};
if !level_too_high || !cur_pte.is_present() || cur_pte.is_last(cursor.level) {
if !level_too_high {
break;
}
let cur_pte = cursor.read_cur_pte();
if !cur_pte.is_present() || cur_pte.is_last(cursor.level) {
break;
}
cursor.level_down();
// Release the guard of the previous level.
cursor.guards[(C::NR_LEVELS - cursor.level) as usize - 1] = None;
cursor.guard_level -= 1;
}
Ok(cursor)
}
@ -163,9 +172,11 @@ where
if self.va >= self.barrier_va.end {
return None;
}
loop {
let level = self.level;
let va = self.va;
let pte = self.read_cur_pte();
if !pte.is_present() {
return Some(PageTableQueryResult::NotMapped {
@ -177,6 +188,7 @@ where
self.level_down();
continue;
}
match self.cur_child() {
Child::Frame(frame) => {
return Some(PageTableQueryResult::Mapped {
@ -228,6 +240,7 @@ where
/// Goes down a level assuming a child page table exists.
fn level_down(&mut self) {
debug_assert!(self.level > 1);
if let Child::PageTable(nxt_lvl_frame) = self.cur_child() {
self.level -= 1;
self.guards[(C::NR_LEVELS - self.level) as usize] = Some(nxt_lvl_frame.lock());
@ -324,6 +337,7 @@ where
pub(crate) fn jump(&mut self, va: Vaddr) {
assert!(self.0.barrier_va.contains(&va));
assert!(va % C::BASE_PAGE_SIZE == 0);
loop {
let cur_node_start = self.0.va & !(page_size::<C>(self.0.level + 1) - 1);
let cur_node_end = cur_node_start + page_size::<C>(self.0.level + 1);
@ -332,12 +346,14 @@ where
self.0.va = va;
return;
}
// There is a corner case that the cursor is depleted, sitting at the start of the
// next node but the next node is not locked because the parent is not locked.
if self.0.va >= self.0.barrier_va.end && self.0.level == self.0.guard_level {
self.0.va = va;
return;
}
debug_assert!(self.0.level < self.0.guard_level);
self.0.level_up();
}
@ -360,6 +376,7 @@ where
let end = self.0.va + frame.size();
assert!(end <= self.0.barrier_va.end);
debug_assert!(!self.0.in_untracked_range());
// Go down if not applicable.
while self.0.level > C::HIGHEST_TRANSLATION_LEVEL
|| self.0.va % page_size::<C>(self.0.level) != 0
@ -376,9 +393,11 @@ where
continue;
}
debug_assert_eq!(self.0.level, frame.level());
// Map the current page.
let idx = self.0.cur_idx();
self.cur_node_mut().set_child_frame(idx, frame, prop);
self.0.move_forward();
}
@ -413,6 +432,7 @@ where
let end = self.0.va + pa.len();
let mut pa = pa.start;
assert!(end <= self.0.barrier_va.end);
while self.0.va < end {
// We ensure not mapping in reserved kernel shared tables or releasing it.
// Although it may be an invariant for all architectures and will be optimized
@ -435,11 +455,13 @@ where
}
continue;
}
// Map the current page.
debug_assert!(self.0.in_untracked_range());
let idx = self.0.cur_idx();
let level = self.0.level;
self.cur_node_mut().set_child_untracked(idx, pa, prop);
let level = self.0.level;
pa += page_size::<C>(level);
self.0.move_forward();
}
@ -460,6 +482,7 @@ where
let end = self.0.va + len;
assert!(end <= self.0.barrier_va.end);
assert!(end % C::BASE_PAGE_SIZE == 0);
while self.0.va < end {
let cur_pte = self.0.read_cur_pte();
let untracked = self.0.in_untracked_range();
@ -494,6 +517,7 @@ where
// Unmap the current page.
let idx = self.0.cur_idx();
self.cur_node_mut().unset_child(idx, untracked);
self.0.move_forward();
}
}
@ -519,6 +543,7 @@ where
) -> Result<(), PageTableError> {
let end = self.0.va + len;
assert!(end <= self.0.barrier_va.end);
while self.0.va < end {
let cur_pte = self.0.read_cur_pte();
if !cur_pte.is_present() {
@ -528,11 +553,13 @@ where
self.0.move_forward();
continue;
}
// Go down if it's not a last node.
if !cur_pte.is_last(self.0.level) {
self.0.level_down();
continue;
}
// Go down if the page size is too big and we are protecting part
// of untracked huge pages.
let vaddr_not_fit = self.0.va % page_size::<C>(self.0.level) != 0
@ -543,10 +570,13 @@ where
} else if vaddr_not_fit {
return Err(PageTableError::ProtectingPartial);
}
let idx = self.0.cur_idx();
let mut pte_prop = cur_pte.prop();
op(&mut pte_prop);
let idx = self.0.cur_idx();
self.cur_node_mut().protect(idx, pte_prop);
self.0.move_forward();
}
Ok(())
@ -559,10 +589,13 @@ where
if self.0.guard_level != C::NR_LEVELS {
return None;
}
while self.0.level < C::NR_LEVELS {
self.0.level_up();
}
self.0.guards[0].take()
// Ok to drop the cursor here because we ensure not to access the page table if the current
// level is the root level when running the dropping method.
}
@ -572,6 +605,7 @@ where
/// This method will create a new child frame and go down to it.
fn level_down_create(&mut self) {
debug_assert!(self.0.level > 1);
let new_frame = PageTableNode::<E, C>::alloc(self.0.level - 1);
let idx = self.0.cur_idx();
let untracked = self.0.in_untracked_range();
@ -587,8 +621,10 @@ where
fn level_down_split(&mut self) {
debug_assert!(self.0.level > 1);
debug_assert!(self.0.in_untracked_range());
let idx = self.0.cur_idx();
self.cur_node_mut().split_untracked_huge(idx);
let Child::PageTable(new_frame) = self.0.cur_child() else {
unreachable!();
};