Remove the VA to PA API and tidy up kernel space

This commit is contained in:
Zhang Junyang
2024-05-05 22:51:01 +08:00
committed by Tate, Hongliang Tian
parent cf5cfb3475
commit 679e5dac68
5 changed files with 40 additions and 63 deletions

View File

@ -5,7 +5,7 @@ use core::{mem::size_of, ops::Range};
use pod::Pod; use pod::Pod;
use crate::{ use crate::{
vm::{paddr_to_vaddr, HasPaddr, Paddr, Vaddr, VmIo}, vm::{kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, HasPaddr, Paddr, Vaddr, VmIo},
Error, Result, Error, Result,
}; };
@ -54,7 +54,7 @@ impl VmIo for IoMem {
impl HasPaddr for IoMem { impl HasPaddr for IoMem {
fn paddr(&self) -> Paddr { fn paddr(&self) -> Paddr {
crate::vm::vaddr_to_paddr(self.virtual_address).unwrap() self.virtual_address - LINEAR_MAPPING_BASE_VADDR
} }
} }
@ -70,7 +70,7 @@ impl IoMem {
} }
pub fn paddr(&self) -> Paddr { pub fn paddr(&self) -> Paddr {
crate::vm::vaddr_to_paddr(self.virtual_address).unwrap() self.virtual_address - LINEAR_MAPPING_BASE_VADDR
} }
pub fn length(&self) -> usize { pub fn length(&self) -> usize {

View File

@ -19,9 +19,9 @@ use crate::{
cpu::{CpuException, PageFaultErrorCode, PAGE_FAULT}, cpu::{CpuException, PageFaultErrorCode, PAGE_FAULT},
cpu_local, cpu_local,
vm::{ vm::{
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR}, kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
page_prop::{CachePolicy, PageProperty}, page_prop::{CachePolicy, PageProperty},
PageFlags, PrivilegedPageFlags as PrivFlags, PAGE_SIZE, PHYS_MEM_VADDR_RANGE, PageFlags, PrivilegedPageFlags as PrivFlags, PAGE_SIZE,
}, },
}; };
@ -192,8 +192,8 @@ fn handle_kernel_page_fault(f: &TrapFrame) {
); );
assert!( assert!(
PHYS_MEM_VADDR_RANGE.contains(&(page_fault_vaddr as usize)), LINEAR_MAPPING_VADDR_RANGE.contains(&(page_fault_vaddr as usize)),
"kernel page fault: the address is outside the range of the direct mapping", "kernel page fault: the address is outside the range of the linear mapping",
); );
const SUPPORTED_ERROR_CODES: PageFaultErrorCode = PageFaultErrorCode::PRESENT const SUPPORTED_ERROR_CODES: PageFaultErrorCode = PageFaultErrorCode::PRESENT

View File

@ -2,11 +2,13 @@
//! Kernel memory space management. //! Kernel memory space management.
use core::ops::Range;
use align_ext::AlignExt; use align_ext::AlignExt;
use spin::Once; use spin::Once;
use super::{ use super::{
page_table::{nr_ptes_per_node, page_walk, KernelMode, PageTable}, page_table::{nr_ptes_per_node, KernelMode, PageTable},
CachePolicy, MemoryRegionType, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr, CachePolicy, MemoryRegionType, Paddr, PageFlags, PageProperty, PrivilegedPageFlags, Vaddr,
PAGE_SIZE, PAGE_SIZE,
}; };
@ -16,25 +18,38 @@ use crate::arch::mm::{PageTableEntry, PagingConsts};
/// memory in the kernel address space. /// memory in the kernel address space.
pub(crate) const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000; pub(crate) const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000;
/// The maximum size of the direct mapping of physical memory.
///
/// This size acts as a cap. If the actual memory size exceeds this value,
/// the remaining memory cannot be included in the direct mapping because
/// the maximum size of the direct mapping is limited by this value. On
/// the other hand, if the actual memory size is smaller, the direct
/// mapping can shrink to save memory consumption due to the page table.
///
/// We do not currently have APIs to manually map MMIO pages, so we have
/// to rely on the direct mapping to perform MMIO operations. Therefore,
/// we set the maximum size to 127 TiB, which makes some surprisingly
/// high MMIO addresses usable (e.g., `0x7000_0000_7004` for VirtIO
/// devices in the TDX environment) and leaves the last 1 TiB for other
/// uses (e.g., the kernel code starting at [`kernel_loaded_offset()`]).
pub(crate) const LINEAR_MAPPING_MAX_SIZE: usize = 127 << 40;
/// The address range of the direct mapping of physical memory.
///
/// This range is constructed based on [`PHYS_MEM_BASE_VADDR`] and
/// [`PHYS_MEM_MAPPING_MAX_SIZE`].
pub(crate) const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> =
LINEAR_MAPPING_BASE_VADDR..(LINEAR_MAPPING_BASE_VADDR + LINEAR_MAPPING_MAX_SIZE);
/// The kernel code is linear mapped to this address. /// The kernel code is linear mapped to this address.
/// ///
/// FIXME: This offset should be randomly chosen by the loader or the /// FIXME: This offset should be randomly chosen by the loader or the
/// boot compatibility layer. But we disabled it because the framework /// boot compatibility layer. But we disabled it because the framework
/// doesn't support relocatable kernel yet. /// doesn't support relocatable kernel yet.
pub fn kernel_loaded_offset() -> usize { pub const fn kernel_loaded_offset() -> usize {
0xffff_ffff_8000_0000 0xffff_ffff_8000_0000
} }
const_assert!(LINEAR_MAPPING_VADDR_RANGE.end < kernel_loaded_offset());
pub fn vaddr_to_paddr(va: Vaddr) -> Option<Paddr> {
if (LINEAR_MAPPING_BASE_VADDR..=kernel_loaded_offset()).contains(&va) {
// can use offset to get the physical address
Some(va - LINEAR_MAPPING_BASE_VADDR)
} else {
let root_paddr = crate::arch::mm::current_page_table_paddr();
// Safety: the root page table is valid since we read it from the register.
unsafe { page_walk::<PageTableEntry, PagingConsts>(root_paddr, va).map(|(pa, _)| pa) }
}
}
/// Convert physical address to virtual address using offset, only available inside aster-frame /// Convert physical address to virtual address using offset, only available inside aster-frame
pub(crate) fn paddr_to_vaddr(pa: Paddr) -> usize { pub(crate) fn paddr_to_vaddr(pa: Paddr) -> usize {

View File

@ -29,7 +29,6 @@ pub use self::{
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr}, dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
frame::{VmFrame, VmFrameVec, VmFrameVecIter, VmReader, VmSegment, VmWriter}, frame::{VmFrame, VmFrameVec, VmFrameVecIter, VmReader, VmSegment, VmWriter},
io::VmIo, io::VmIo,
kspace::vaddr_to_paddr,
options::VmAllocOptions, options::VmAllocOptions,
page_prop::{CachePolicy, PageFlags, PageProperty}, page_prop::{CachePolicy, PageFlags, PageProperty},
space::{VmMapOptions, VmSpace}, space::{VmMapOptions, VmSpace},
@ -76,47 +75,10 @@ pub(crate) trait PagingConstsTrait: Debug + 'static {
/// for the rationale. /// for the rationale.
pub const MAX_USERSPACE_VADDR: Vaddr = 0x0000_8000_0000_0000 - PAGE_SIZE; pub const MAX_USERSPACE_VADDR: Vaddr = 0x0000_8000_0000_0000 - PAGE_SIZE;
/// The base address of the direct mapping of physical memory. /// The kernel address space.
pub(crate) const PHYS_MEM_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000; /// There are the high canonical addresses defined in most 48-bit width
/// architectures.
/// The maximum size of the direct mapping of physical memory. pub(crate) const KERNEL_VADDR_RANGE: Range<Vaddr> = 0xffff_8000_0000_0000..0xffff_ffff_ffff_0000;
///
/// This size acts as a cap. If the actual memory size exceeds this value,
/// the remaining memory cannot be included in the direct mapping because
/// the maximum size of the direct mapping is limited by this value. On
/// the other hand, if the actual memory size is smaller, the direct
/// mapping can shrink to save memory consumption due to the page table.
///
/// We do not currently have APIs to manually map MMIO pages, so we have
/// to rely on the direct mapping to perform MMIO operations. Therefore,
/// we set the maximum size to 127 TiB, which makes some surprisingly
/// high MMIO addresses usable (e.g., `0x7000_0000_7004` for VirtIO
/// devices in the TDX environment) and leaves the last 1 TiB for other
/// uses (e.g., the kernel code starting at [`kernel_loaded_offset()`]).
pub(crate) const PHYS_MEM_MAPPING_MAX_SIZE: usize = 127 << 40;
/// The address range of the direct mapping of physical memory.
///
/// This range is constructed based on [`PHYS_MEM_BASE_VADDR`] and
/// [`PHYS_MEM_MAPPING_MAX_SIZE`].
pub(crate) const PHYS_MEM_VADDR_RANGE: Range<Vaddr> =
PHYS_MEM_BASE_VADDR..(PHYS_MEM_BASE_VADDR + PHYS_MEM_MAPPING_MAX_SIZE);
/// The kernel code is linear mapped to this address.
///
/// FIXME: This offset should be randomly chosen by the loader or the
/// boot compatibility layer. But we disabled it because the framework
/// doesn't support relocatable kernel yet.
pub const fn kernel_loaded_offset() -> usize {
0xffff_ffff_8000_0000
}
const_assert!(PHYS_MEM_VADDR_RANGE.end < kernel_loaded_offset());
/// Start of the kernel address space.
/// This is the _lowest_ address of the x86-64's _high_ canonical addresses.
pub(crate) const KERNEL_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000;
/// End of the kernel address space (non inclusive).
pub(crate) const KERNEL_END_VADDR: Vaddr = 0xffff_ffff_ffff_0000;
/// Get physical address trait /// Get physical address trait
pub trait HasPaddr { pub trait HasPaddr {

View File

@ -58,7 +58,7 @@ impl PageTableMode for UserMode {
pub struct KernelMode {} pub struct KernelMode {}
impl PageTableMode for KernelMode { impl PageTableMode for KernelMode {
const VADDR_RANGE: Range<Vaddr> = super::KERNEL_BASE_VADDR..super::KERNEL_END_VADDR; const VADDR_RANGE: Range<Vaddr> = super::KERNEL_VADDR_RANGE;
} }
// Here are some const values that are determined by the paging constants. // Here are some const values that are determined by the paging constants.