mirror of
https://github.com/asterinas/asterinas.git
synced 2025-06-09 13:26:48 +00:00
Add more getter methods for VmSpace
This commit is contained in:
parent
acf956fa03
commit
233e1fac98
@ -14,7 +14,7 @@ use crate::{
|
|||||||
Error,
|
Error,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct MapArea {
|
pub struct MapArea {
|
||||||
pub flags: PageTableFlags,
|
pub flags: PageTableFlags,
|
||||||
pub start_va: Vaddr,
|
pub start_va: Vaddr,
|
||||||
@ -28,23 +28,6 @@ pub struct MemorySet {
|
|||||||
areas: BTreeMap<Vaddr, MapArea>,
|
areas: BTreeMap<Vaddr, MapArea>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for MapArea {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
let mut mapper = BTreeMap::new();
|
|
||||||
for (&va, old) in &self.mapper {
|
|
||||||
let new = VmAllocOptions::new(1).uninit(true).alloc_single().unwrap();
|
|
||||||
new.copy_from_frame(old);
|
|
||||||
mapper.insert(va, new.clone());
|
|
||||||
}
|
|
||||||
Self {
|
|
||||||
start_va: self.start_va,
|
|
||||||
size: self.size,
|
|
||||||
flags: self.flags,
|
|
||||||
mapper,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MapArea {
|
impl MapArea {
|
||||||
pub fn mapped_size(&self) -> usize {
|
pub fn mapped_size(&self) -> usize {
|
||||||
self.size
|
self.size
|
||||||
@ -162,17 +145,15 @@ impl MemorySet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// determine whether a Vaddr is in a mapped area
|
/// Determine whether a Vaddr is in a mapped area
|
||||||
pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
|
pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
|
||||||
for (start_address, map_area) in self.areas.iter() {
|
self.pt.is_mapped(vaddr)
|
||||||
if *start_address > vaddr {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
if *start_address <= vaddr && vaddr < *start_address + map_area.mapped_size() {
|
|
||||||
return true;
|
/// Return the flags of the PTE for the target virtual memory address.
|
||||||
}
|
/// If the PTE does not exist, return `None`.
|
||||||
}
|
pub fn flags(&self, vaddr: Vaddr) -> Option<PageTableFlags> {
|
||||||
false
|
self.pt.flags(vaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
@ -263,6 +244,11 @@ impl MemorySet {
|
|||||||
|
|
||||||
pub fn protect(&mut self, addr: Vaddr, flags: PageTableFlags) {
|
pub fn protect(&mut self, addr: Vaddr, flags: PageTableFlags) {
|
||||||
let va = addr;
|
let va = addr;
|
||||||
|
// Temporary solution, since the `MapArea` currently only represents
|
||||||
|
// a single `VmFrame`.
|
||||||
|
if let Some(areas) = self.areas.get_mut(&va) {
|
||||||
|
areas.flags = flags;
|
||||||
|
}
|
||||||
self.pt.protect(va, flags).unwrap();
|
self.pt.protect(va, flags).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
|||||||
paddr: Paddr,
|
paddr: Paddr,
|
||||||
flags: T::F,
|
flags: T::F,
|
||||||
) -> Result<(), PageTableError> {
|
) -> Result<(), PageTableError> {
|
||||||
let last_entry = self.page_walk(vaddr, true).unwrap();
|
let last_entry = self.do_page_walk_mut(vaddr, true).unwrap();
|
||||||
trace!(
|
trace!(
|
||||||
"Page Table: Map vaddr:{:x?}, paddr:{:x?}, flags:{:x?}",
|
"Page Table: Map vaddr:{:x?}, paddr:{:x?}, flags:{:x?}",
|
||||||
vaddr,
|
vaddr,
|
||||||
@ -295,22 +295,18 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find the last PTE
|
/// Find the last PTE and return its mutable reference.
|
||||||
///
|
///
|
||||||
/// If create is set, it will create the next table until the last PTE.
|
/// If create is set, it will create the next table until the last PTE.
|
||||||
/// If not, it will return None if it is not reach the last PTE.
|
/// If not, it will return `None` if it cannot reach the last PTE.
|
||||||
///
|
fn do_page_walk_mut(&mut self, vaddr: Vaddr, create: bool) -> Option<&mut T> {
|
||||||
fn page_walk(&mut self, vaddr: Vaddr, create: bool) -> Option<&mut T> {
|
let mut level = self.config.address_width as usize;
|
||||||
let mut count = self.config.address_width as usize;
|
|
||||||
debug_assert!(size_of::<T>() * (T::page_index(vaddr, count) + 1) <= PAGE_SIZE);
|
|
||||||
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
||||||
// It only change the memory controlled by page table.
|
// It only change the memory controlled by page table.
|
||||||
let mut current: &mut T = unsafe {
|
let mut current: &mut T =
|
||||||
&mut *(paddr_to_vaddr(self.root_paddr + size_of::<T>() * T::page_index(vaddr, count))
|
unsafe { &mut *(calculate_pte_vaddr::<T>(self.root_paddr, vaddr, level) as *mut T) };
|
||||||
as *mut T)
|
|
||||||
};
|
|
||||||
|
|
||||||
while count > 1 {
|
while level > 1 {
|
||||||
if !current.flags().is_present() {
|
if !current.flags().is_present() {
|
||||||
if !create {
|
if !create {
|
||||||
return None;
|
return None;
|
||||||
@ -329,19 +325,44 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
|||||||
if current.flags().is_huge() {
|
if current.flags().is_huge() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
count -= 1;
|
level -= 1;
|
||||||
debug_assert!(size_of::<T>() * (T::page_index(vaddr, count) + 1) <= PAGE_SIZE);
|
|
||||||
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
||||||
// It only change the memory controlled by page table.
|
// It only change the memory controlled by page table.
|
||||||
current = unsafe {
|
current = unsafe {
|
||||||
&mut *(paddr_to_vaddr(
|
&mut *(calculate_pte_vaddr::<T>(current.paddr(), vaddr, level) as *mut T)
|
||||||
current.paddr() + size_of::<T>() * T::page_index(vaddr, count),
|
|
||||||
) as *mut T)
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
Some(current)
|
Some(current)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Find the last PTE and return its immutable reference.
|
||||||
|
///
|
||||||
|
/// This function will return `None` if it cannot reach the last PTE.
|
||||||
|
/// Note that finding an entry does not mean the corresponding virtual memory address is mapped
|
||||||
|
/// since the entry may be empty.
|
||||||
|
fn do_page_walk(&self, vaddr: Vaddr) -> Option<&T> {
|
||||||
|
let mut level = self.config.address_width as usize;
|
||||||
|
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
||||||
|
// It only change the memory controlled by page table.
|
||||||
|
let mut current: &T =
|
||||||
|
unsafe { &*(calculate_pte_vaddr::<T>(self.root_paddr, vaddr, level) as *const T) };
|
||||||
|
|
||||||
|
while level > 1 {
|
||||||
|
if !current.flags().is_present() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
if current.flags().is_huge() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
level -= 1;
|
||||||
|
// Safety: The offset does not exceed the value of PAGE_SIZE.
|
||||||
|
// It only change the memory controlled by page table.
|
||||||
|
current =
|
||||||
|
unsafe { &*(calculate_pte_vaddr::<T>(current.paddr(), vaddr, level) as *const T) };
|
||||||
|
}
|
||||||
|
Some(current)
|
||||||
|
}
|
||||||
|
|
||||||
/// Unmap `vaddr`.
|
/// Unmap `vaddr`.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
@ -349,9 +370,11 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
|||||||
/// This function allows arbitrary modifications to the page table.
|
/// This function allows arbitrary modifications to the page table.
|
||||||
/// Incorrect modifications may cause the kernel to crash (e.g., unmap the linear mapping.).
|
/// Incorrect modifications may cause the kernel to crash (e.g., unmap the linear mapping.).
|
||||||
unsafe fn do_unmap(&mut self, vaddr: Vaddr) -> Result<(), PageTableError> {
|
unsafe fn do_unmap(&mut self, vaddr: Vaddr) -> Result<(), PageTableError> {
|
||||||
let last_entry = self.page_walk(vaddr, false).unwrap();
|
let last_entry = self
|
||||||
|
.do_page_walk_mut(vaddr, false)
|
||||||
|
.ok_or(PageTableError::InvalidModification)?;
|
||||||
trace!("Page Table: Unmap vaddr:{:x?}", vaddr);
|
trace!("Page Table: Unmap vaddr:{:x?}", vaddr);
|
||||||
if !last_entry.is_used() && !last_entry.flags().is_present() {
|
if !last_entry.is_used() || !last_entry.flags().is_present() {
|
||||||
return Err(PageTableError::InvalidModification);
|
return Err(PageTableError::InvalidModification);
|
||||||
}
|
}
|
||||||
last_entry.clear();
|
last_entry.clear();
|
||||||
@ -368,7 +391,9 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
|||||||
/// Incorrect modifications may cause the kernel to crash
|
/// Incorrect modifications may cause the kernel to crash
|
||||||
/// (e.g., make the linear mapping visible to the user mode applications.).
|
/// (e.g., make the linear mapping visible to the user mode applications.).
|
||||||
unsafe fn do_protect(&mut self, vaddr: Vaddr, new_flags: T::F) -> Result<T::F, PageTableError> {
|
unsafe fn do_protect(&mut self, vaddr: Vaddr, new_flags: T::F) -> Result<T::F, PageTableError> {
|
||||||
let last_entry = self.page_walk(vaddr, false).unwrap();
|
let last_entry = self
|
||||||
|
.do_page_walk_mut(vaddr, false)
|
||||||
|
.ok_or(PageTableError::InvalidModification)?;
|
||||||
let old_flags = last_entry.flags();
|
let old_flags = last_entry.flags();
|
||||||
trace!(
|
trace!(
|
||||||
"Page Table: Protect vaddr:{:x?}, flags:{:x?}",
|
"Page Table: Protect vaddr:{:x?}, flags:{:x?}",
|
||||||
@ -402,14 +427,22 @@ impl<T: PageTableEntryTrait, M> PageTable<T, M> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn flags(&mut self, vaddr: Vaddr) -> Option<T::F> {
|
/// Return the flags of the PTE for the target virtual memory address.
|
||||||
let last_entry = self.page_walk(vaddr, false)?;
|
/// If the PTE does not exist, return `None`.
|
||||||
Some(last_entry.flags())
|
pub fn flags(&self, vaddr: Vaddr) -> Option<T::F> {
|
||||||
|
self.do_page_walk(vaddr).map(|entry| entry.flags())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the root physical address of current `PageTable`.
|
||||||
pub fn root_paddr(&self) -> Paddr {
|
pub fn root_paddr(&self) -> Paddr {
|
||||||
self.root_paddr
|
self.root_paddr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Determine whether the target virtual memory address is mapped.
|
||||||
|
pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
|
||||||
|
self.do_page_walk(vaddr)
|
||||||
|
.is_some_and(|last_entry| last_entry.is_used() && last_entry.flags().is_present())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read `NR_ENTRIES_PER_PAGE` of PageTableEntry from an address
|
/// Read `NR_ENTRIES_PER_PAGE` of PageTableEntry from an address
|
||||||
@ -428,14 +461,23 @@ pub unsafe fn table_of<'a, T: PageTableEntryTrait>(pa: Paddr) -> Option<&'a mut
|
|||||||
|
|
||||||
/// translate a virtual address to physical address which cannot use offset to get physical address
|
/// translate a virtual address to physical address which cannot use offset to get physical address
|
||||||
pub fn vaddr_to_paddr(vaddr: Vaddr) -> Option<Paddr> {
|
pub fn vaddr_to_paddr(vaddr: Vaddr) -> Option<Paddr> {
|
||||||
let mut page_table = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
let page_table = KERNEL_PAGE_TABLE.get().unwrap().lock();
|
||||||
// Although we bypass the unsafe APIs provided by KernelMode, the purpose here is
|
// Although we bypass the unsafe APIs provided by KernelMode, the purpose here is
|
||||||
// only to obtain the corresponding physical address according to the mapping.
|
// only to obtain the corresponding physical address according to the mapping.
|
||||||
let last_entry = page_table.page_walk(vaddr, false)?;
|
let last_entry = page_table.do_page_walk(vaddr)?;
|
||||||
// FIXME: Support huge page
|
// FIXME: Support huge page
|
||||||
Some(last_entry.paddr() + (vaddr & (PAGE_SIZE - 1)))
|
Some(last_entry.paddr() + (vaddr & (PAGE_SIZE - 1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn calculate_pte_vaddr<T: PageTableEntryTrait>(
|
||||||
|
root_pa: Paddr,
|
||||||
|
target_va: Vaddr,
|
||||||
|
level: usize,
|
||||||
|
) -> Vaddr {
|
||||||
|
debug_assert!(size_of::<T>() * (T::page_index(target_va, level) + 1) <= PAGE_SIZE);
|
||||||
|
paddr_to_vaddr(root_pa + size_of::<T>() * T::page_index(target_va, level))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
KERNEL_PAGE_TABLE.call_once(|| {
|
KERNEL_PAGE_TABLE.call_once(|| {
|
||||||
// Safety: The `KERENL_PAGE_TABLE` is the only page table that is used to modify the initialize
|
// Safety: The `KERENL_PAGE_TABLE` is the only page table that is used to modify the initialize
|
||||||
|
@ -31,6 +31,7 @@ impl VmSpace {
|
|||||||
memory_set: Arc::new(Mutex::new(MemorySet::new())),
|
memory_set: Arc::new(Mutex::new(MemorySet::new())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Activate the page table, load root physical address to cr3
|
/// Activate the page table, load root physical address to cr3
|
||||||
#[allow(clippy::missing_safety_doc)]
|
#[allow(clippy::missing_safety_doc)]
|
||||||
pub unsafe fn activate(&self) {
|
pub unsafe fn activate(&self) {
|
||||||
@ -80,12 +81,26 @@ impl VmSpace {
|
|||||||
Ok(base_addr)
|
Ok(base_addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// determine whether a vaddr is already mapped
|
/// Determine whether a `vaddr` is already mapped.
|
||||||
pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
|
pub fn is_mapped(&self, vaddr: Vaddr) -> bool {
|
||||||
let memory_set = self.memory_set.lock();
|
let memory_set = self.memory_set.lock();
|
||||||
memory_set.is_mapped(vaddr)
|
memory_set.is_mapped(vaddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Determine whether the target `vaddr` is writable based on the page table.
|
||||||
|
pub fn is_writable(&self, vaddr: Vaddr) -> bool {
|
||||||
|
let memory_set = self.memory_set.lock();
|
||||||
|
let flags = memory_set.flags(vaddr);
|
||||||
|
flags.is_some_and(|flags| flags.contains(PageTableFlags::WRITABLE))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine whether the target `vaddr` is executable based on the page table.
|
||||||
|
pub fn is_executable(&self, vaddr: Vaddr) -> bool {
|
||||||
|
let memory_set = self.memory_set.lock();
|
||||||
|
let flags = memory_set.flags(vaddr);
|
||||||
|
flags.is_some_and(|flags| !flags.contains(PageTableFlags::NO_EXECUTE))
|
||||||
|
}
|
||||||
|
|
||||||
/// Unmaps the physical memory pages within the VM address range.
|
/// Unmaps the physical memory pages within the VM address range.
|
||||||
///
|
///
|
||||||
/// The range is allowed to contain gaps, where no physical memory pages
|
/// The range is allowed to contain gaps, where no physical memory pages
|
||||||
@ -128,6 +143,16 @@ impl VmSpace {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Deep-copy the current `VmSpace`.
|
||||||
|
///
|
||||||
|
/// The generated new `VmSpace` possesses a `MemorySet` independent from the
|
||||||
|
/// original `VmSpace`, with initial contents identical to the original.
|
||||||
|
pub fn deep_copy(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
memory_set: Arc::new(Mutex::new(self.memory_set.lock().clone())),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for VmSpace {
|
impl Default for VmSpace {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user