Rename various concepts related to memory management

This commit is contained in:
Zhang Junyang
2024-05-26 17:53:44 +00:00
committed by Tate, Hongliang Tian
parent 03a39c94ca
commit 14e1b1a9fc
97 changed files with 331 additions and 353 deletions

View File

@ -15,7 +15,7 @@ use crate::{
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
BootloaderAcpiArg, BootloaderFramebufferArg,
},
vm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
mm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
};
static BOOT_PARAMS: Once<BootParams> = Once::new();

View File

@ -12,7 +12,7 @@ use crate::{
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
BootloaderAcpiArg, BootloaderFramebufferArg,
},
vm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
mm::kspace::{paddr_to_vaddr, LINEAR_MAPPING_BASE_VADDR},
};
global_asm!(include_str!("header.S"));

View File

@ -15,7 +15,7 @@ use crate::{
memory_region::{non_overlapping_regions_from, MemoryRegion, MemoryRegionType},
BootloaderAcpiArg, BootloaderFramebufferArg,
},
vm::kspace::paddr_to_vaddr,
mm::kspace::paddr_to_vaddr,
};
global_asm!(include_str!("header.S"));

View File

@ -9,11 +9,11 @@ use pod::Pod;
use super::second_stage::{DeviceMode, PageTableEntry, PagingConsts};
use crate::{
bus::pci::PciDeviceLocation,
vm::{
mm::{
dma::Daddr,
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableError,
Paddr, PageFlags, PageTable, VmAllocOptions, VmFrame, VmIo, PAGE_SIZE,
Frame, Paddr, PageFlags, PageTable, VmAllocOptions, VmIo, PAGE_SIZE,
},
};
@ -36,7 +36,7 @@ impl RootEntry {
pub struct RootTable {
/// Total 256 bus, each entry is 128 bits.
root_frame: VmFrame,
root_frame: Frame,
// TODO: Use radix tree instead.
context_tables: BTreeMap<Paddr, ContextTable>,
}
@ -233,7 +233,7 @@ pub enum AddressWidth {
pub struct ContextTable {
/// Total 32 devices, each device has 8 functions.
entries_frame: VmFrame,
entries_frame: Frame,
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
}

View File

@ -10,7 +10,7 @@ use trapframe::TrapFrame;
use volatile::{access::ReadWrite, Volatile};
use super::remapping::Capability;
use crate::{trap::IrqLine, vm::Vaddr};
use crate::{mm::Vaddr, trap::IrqLine};
#[derive(Debug)]
pub struct FaultEventRegisters {

View File

@ -13,8 +13,8 @@ use spin::Once;
use crate::{
arch::iommu::context_table::RootTable,
bus::pci::PciDeviceLocation,
mm::{dma::Daddr, page_table::PageTableError, Paddr, PageTable},
sync::Mutex,
vm::{dma::Daddr, page_table::PageTableError, Paddr, PageTable},
};
#[derive(Debug)]

View File

@ -17,7 +17,7 @@ use crate::{
ACPI_TABLES,
},
},
vm::paddr_to_vaddr,
mm::paddr_to_vaddr,
};
#[derive(Debug)]

View File

@ -4,7 +4,7 @@ use core::ops::Range;
use pod::Pod;
use crate::vm::{
use crate::mm::{
page_prop::{CachePolicy, PageFlags, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableEntryTrait, PageTableMode},
Paddr, PageProperty, PagingConstsTrait, PagingLevel, Vaddr,

View File

@ -9,7 +9,7 @@ use super::{
remapping::{Andd, Atsr, Drhd, Rhsa, Rmrr, Satc, Sidp},
SdtHeaderWrapper,
};
use crate::vm::paddr_to_vaddr;
use crate::mm::paddr_to_vaddr;
/// DMA Remapping structure. When IOMMU is enabled, the structure should be present in the ACPI table,
/// and the user can use the DRHD table in this structure to obtain the register base addresses used to configure functions such as IOMMU.

View File

@ -15,8 +15,8 @@ use spin::Once;
use crate::{
boot::{self, BootloaderAcpiArg},
mm::paddr_to_vaddr,
sync::SpinLock,
vm::paddr_to_vaddr,
};
/// RSDP information, key is the signature, value is the virtual address of the signature

View File

@ -12,7 +12,7 @@ use spin::Once;
#[cfg(feature = "intel_tdx")]
use crate::arch::tdx_guest;
use crate::{
arch::x86::kernel::acpi::ACPI_TABLES, sync::SpinLock, trap::IrqLine, vm::paddr_to_vaddr, Error,
arch::x86::kernel::acpi::ACPI_TABLES, mm::paddr_to_vaddr, sync::SpinLock, trap::IrqLine, Error,
Result,
};

View File

@ -4,7 +4,7 @@ use spin::Once;
use x86::apic::xapic;
use super::ApicTimer;
use crate::{sync::Mutex, vm};
use crate::{mm, sync::Mutex};
const IA32_APIC_BASE_MSR: u32 = 0x1B;
const IA32_APIC_BASE_MSR_BSP: u32 = 0x100; // Processor is a BSP
@ -24,7 +24,7 @@ impl XApic {
if !Self::has_xapic() {
return None;
}
let address = vm::paddr_to_vaddr(get_apic_base_address());
let address = mm::paddr_to_vaddr(get_apic_base_address());
let region: &'static mut [u32] = unsafe { &mut *(address as *mut [u32; 256]) };
Some(Self {
mmio_region: region,

View File

@ -6,7 +6,7 @@ use core::ops::Range;
use pod::Pod;
use x86_64::{instructions::tlb, structures::paging::PhysFrame, VirtAddr};
use crate::vm::{
use crate::mm::{
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::PageTableEntryTrait,
Paddr, PagingConstsTrait, PagingLevel, Vaddr, PAGE_SIZE,

View File

@ -13,7 +13,7 @@ use trapframe::TrapFrame;
use crate::{
arch::mm::PageTableFlags,
vm::{
mm::{
kspace::KERNEL_PAGE_TABLE,
paddr_to_vaddr,
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},

View File

@ -11,8 +11,8 @@ use volatile::{
use crate::{
arch::x86::kernel::{acpi::ACPI_TABLES, apic::ioapic},
mm::paddr_to_vaddr,
trap::IrqLine,
vm::paddr_to_vaddr,
};
static HPET_INSTANCE: Once<Hpet> = Once::new();

View File

@ -6,7 +6,7 @@
use alloc::{vec, vec::Vec};
use core::mem::swap;
use crate::vm::kspace::kernel_loaded_offset;
use crate::mm::kspace::kernel_loaded_offset;
/// The type of initial memory regions that are needed for the kernel.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]

View File

@ -6,8 +6,8 @@ use log::info;
use super::VIRTIO_MMIO_MAGIC;
use crate::{
io_mem::IoMem,
mm::{paddr_to_vaddr, Paddr, VmIo},
trap::IrqLine,
vm::{paddr_to_vaddr, Paddr, VmIo},
};
/// MMIO Common device.

View File

@ -16,8 +16,8 @@ use self::bus::MmioBus;
#[cfg(feature = "intel_tdx")]
use crate::arch::tdx_guest;
use crate::{
arch::kernel::IO_APIC, bus::mmio::device::MmioCommonDevice, sync::SpinLock, trap::IrqLine,
vm::paddr_to_vaddr,
arch::kernel::IO_APIC, bus::mmio::device::MmioCommonDevice, mm::paddr_to_vaddr, sync::SpinLock,
trap::IrqLine,
};
const VIRTIO_MMIO_MAGIC: u32 = 0x74726976;

View File

@ -13,8 +13,8 @@ use crate::{
common_device::PciCommonDevice,
device_info::PciDeviceLocation,
},
mm::VmIo,
trap::IrqLine,
vm::VmIo,
};
/// MSI-X capability. It will set the BAR space it uses to be hidden.

View File

@ -5,4 +5,4 @@ extern crate xarray as xarray_crate;
pub use xarray_crate::{Cursor, CursorMut, XArray, XMark};
pub use crate::vm::page::VmFrameRef;
pub use crate::mm::page::VmFrameRef;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use crate::vm::page_table::PageTableError;
use crate::mm::page_table::PageTableError;
/// The error type which is returned from the APIs of this crate.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]

View File

@ -5,7 +5,7 @@ use core::{mem::size_of, ops::Range};
use pod::Pod;
use crate::{
vm::{kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, HasPaddr, Paddr, Vaddr, VmIo},
mm::{kspace::LINEAR_MAPPING_BASE_VADDR, paddr_to_vaddr, HasPaddr, Paddr, Vaddr, VmIo},
Error, Result,
};

View File

@ -38,13 +38,13 @@ pub mod cpu;
mod error;
pub mod io_mem;
pub mod logger;
pub mod mm;
pub mod panicking;
pub mod prelude;
pub mod sync;
pub mod task;
pub mod trap;
pub mod user;
pub mod vm;
#[cfg(feature = "intel_tdx")]
use tdx_guest::init_tdx;
@ -64,20 +64,20 @@ pub fn init() {
td_info.attributes
);
vm::heap_allocator::init();
mm::heap_allocator::init();
boot::init();
vm::page::allocator::init();
let mut boot_pt = vm::get_boot_pt();
let meta_pages = vm::init_page_meta(&mut boot_pt);
vm::misc_init();
mm::page::allocator::init();
let mut boot_pt = mm::get_boot_pt();
let meta_pages = mm::init_page_meta(&mut boot_pt);
mm::misc_init();
trap::init();
arch::after_all_init();
bus::init();
vm::kspace::init_kernel_page_table(boot_pt, meta_pages);
mm::kspace::init_kernel_page_table(boot_pt, meta_pages);
invoke_ffi_init_funcs();
}

View File

@ -11,11 +11,11 @@ use super::{check_and_insert_dma_mapping, remove_dma_mapping, DmaError, HasDaddr
use crate::arch::tdx_guest;
use crate::{
arch::iommu,
vm::{
mm::{
dma::{dma_type, Daddr, DmaType},
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
page_prop::CachePolicy,
HasPaddr, Paddr, VmIo, VmReader, VmSegment, VmWriter, PAGE_SIZE,
HasPaddr, Paddr, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE,
},
};
@ -32,7 +32,7 @@ pub struct DmaCoherent {
#[derive(Debug)]
struct DmaCoherentInner {
vm_segment: VmSegment,
vm_segment: Segment,
start_daddr: Daddr,
is_cache_coherent: bool,
}
@ -47,7 +47,7 @@ impl DmaCoherent {
///
/// The method fails if any part of the given VM segment
/// already belongs to a DMA mapping.
pub fn map(vm_segment: VmSegment, is_cache_coherent: bool) -> Result<Self, DmaError> {
pub fn map(vm_segment: Segment, is_cache_coherent: bool) -> Result<Self, DmaError> {
let frame_count = vm_segment.nframes();
let start_paddr = vm_segment.start_paddr();
if !check_and_insert_dma_mapping(start_paddr, frame_count) {
@ -109,7 +109,7 @@ impl HasDaddr for DmaCoherent {
}
impl Deref for DmaCoherent {
type Target = VmSegment;
type Target = Segment;
fn deref(&self) -> &Self::Target {
&self.inner.vm_segment
}
@ -190,7 +190,7 @@ mod test {
use alloc::vec;
use super::*;
use crate::vm::VmAllocOptions;
use crate::mm::VmAllocOptions;
#[ktest]
fn map_with_coherent_device() {

View File

@ -12,9 +12,9 @@ use crate::arch::tdx_guest;
use crate::{
arch::iommu,
error::Error,
vm::{
mm::{
dma::{dma_type, Daddr, DmaType},
HasPaddr, Paddr, VmIo, VmReader, VmSegment, VmWriter, PAGE_SIZE,
HasPaddr, Paddr, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE,
},
};
@ -30,7 +30,7 @@ pub struct DmaStream {
#[derive(Debug)]
struct DmaStreamInner {
vm_segment: VmSegment,
vm_segment: Segment,
start_daddr: Daddr,
is_cache_coherent: bool,
direction: DmaDirection,
@ -46,11 +46,11 @@ pub enum DmaDirection {
}
impl DmaStream {
/// Establish DMA stream mapping for a given `VmSegment`.
/// Establish DMA stream mapping for a given `Segment`.
///
/// The method fails if the segment already belongs to a DMA mapping.
pub fn map(
vm_segment: VmSegment,
vm_segment: Segment,
direction: DmaDirection,
is_cache_coherent: bool,
) -> Result<Self, DmaError> {
@ -104,7 +104,7 @@ impl DmaStream {
/// after the DMA mapping is established because
/// there is a chance that the device is updating
/// the memory. Do this at your own risk.
pub fn vm_segment(&self) -> &VmSegment {
pub fn vm_segment(&self) -> &Segment {
&self.inner.vm_segment
}
@ -294,7 +294,7 @@ mod test {
use alloc::vec;
use super::*;
use crate::vm::VmAllocOptions;
use crate::mm::VmAllocOptions;
#[ktest]
fn streaming_map() {

View File

@ -11,7 +11,7 @@ use inherit_methods_macro::inherit_methods;
use spin::Once;
use super::Paddr;
use crate::{arch::iommu::has_iommu, sync::SpinLock, vm::PAGE_SIZE};
use crate::{arch::iommu::has_iommu, mm::PAGE_SIZE, sync::SpinLock};
/// If a device performs DMA to read or write system
/// memory, the addresses used by the device are device addresses.

View File

@ -11,10 +11,10 @@ use log::debug;
use super::paddr_to_vaddr;
use crate::{
mm::{page::allocator::FRAME_ALLOCATOR, PAGE_SIZE},
prelude::*,
sync::SpinLock,
trap::disable_local,
vm::{page::allocator::FRAME_ALLOCATOR, PAGE_SIZE},
Error,
};

View File

@ -9,7 +9,7 @@ use pod::Pod;
use crate::prelude::*;
/// A trait that enables reading/writing data from/to a VM object,
/// e.g., `VmSpace`, `VmFrameVec`, and `VmFrame`.
/// e.g., `VmSpace`, `VmFrameVec`, and `Frame`.
///
/// # Concurrency
///

View File

@ -76,7 +76,7 @@ const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_e100_0000_0000 << ADDR_WIDTH_SHIFT;
const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_e000_0000_0000 << ADDR_WIDTH_SHIFT;
pub(in crate::vm) const FRAME_METADATA_RANGE: Range<Vaddr> =
pub(in crate::mm) const FRAME_METADATA_RANGE: Range<Vaddr> =
FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT;

View File

@ -28,7 +28,7 @@ pub use self::{
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
io::{VmIo, VmReader, VmWriter},
options::VmAllocOptions,
page::{FrameVecIter, VmFrame, VmFrameVec, VmSegment},
page::{Frame, FrameVecIter, Segment, VmFrameVec},
page_prop::{CachePolicy, PageFlags, PageProperty},
space::{VmMapOptions, VmSpace},
};

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0
use super::{page::allocator, VmFrame, VmFrameVec, VmSegment};
use super::{page::allocator, Frame, Segment, VmFrameVec};
use crate::{prelude::*, Error};
/// Options for allocating physical memory pages (or frames).
@ -66,7 +66,7 @@ impl VmAllocOptions {
}
/// Allocate a single page frame according to the given options.
pub fn alloc_single(&self) -> Result<VmFrame> {
pub fn alloc_single(&self) -> Result<Frame> {
if self.nframes != 1 {
return Err(Error::InvalidArgs);
}
@ -81,8 +81,8 @@ impl VmAllocOptions {
/// Allocate a contiguous range of page frames according to the given options.
///
/// The returned `VmSegment` contains at least one page frame.
pub fn alloc_contiguous(&self) -> Result<VmSegment> {
/// The returned `Segment` contains at least one page frame.
pub fn alloc_contiguous(&self) -> Result<Segment> {
// It's no use to checking `self.is_contiguous` here.
if self.nframes == 0 {
return Err(Error::InvalidArgs);

View File

@ -7,10 +7,10 @@ use buddy_system_allocator::FrameAllocator;
use log::info;
use spin::Once;
use super::{meta::FrameMeta, Page, VmFrame, VmFrameVec, VmSegment};
use crate::{boot::memory_region::MemoryRegionType, sync::SpinLock, vm::PAGE_SIZE};
use super::{meta::FrameMeta, Frame, Page, Segment, VmFrameVec};
use crate::{boot::memory_region::MemoryRegionType, mm::PAGE_SIZE, sync::SpinLock};
pub(in crate::vm) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
pub(in crate::mm) static FRAME_ALLOCATOR: Once<SpinLock<FrameAllocator>> = Once::new();
pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
FRAME_ALLOCATOR
@ -23,7 +23,7 @@ pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
for i in 0..nframes {
let paddr = (start + i) * PAGE_SIZE;
// SAFETY: The frame index is valid.
let frame = VmFrame {
let frame = Frame {
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
};
vector.push(frame);
@ -32,16 +32,16 @@ pub(crate) fn alloc(nframes: usize) -> Option<VmFrameVec> {
})
}
pub(crate) fn alloc_single() -> Option<VmFrame> {
pub(crate) fn alloc_single() -> Option<Frame> {
FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).map(|idx| {
let paddr = idx * PAGE_SIZE;
VmFrame {
Frame {
page: Page::<FrameMeta>::from_unused(paddr).unwrap(),
}
})
}
pub(crate) fn alloc_contiguous(nframes: usize) -> Option<VmSegment> {
pub(crate) fn alloc_contiguous(nframes: usize) -> Option<Segment> {
FRAME_ALLOCATOR
.get()
.unwrap()
@ -50,7 +50,7 @@ pub(crate) fn alloc_contiguous(nframes: usize) -> Option<VmSegment> {
.map(|start|
// SAFETY: The range of page frames is contiguous and valid.
unsafe {
VmSegment::new(
Segment::new(
start * PAGE_SIZE,
nframes,
)

View File

@ -8,7 +8,7 @@ use super::{
Page,
};
use crate::{
vm::{
mm::{
io::{VmIo, VmReader, VmWriter},
paddr_to_vaddr, HasPaddr, Paddr, PagingLevel, PAGE_SIZE,
},
@ -17,30 +17,25 @@ use crate::{
/// A handle to a page frame.
///
/// The referenced page frame could either be huge or regular, which can be
/// told by the [`VmFrame::size`] method. It is ensured that there would be
/// only one TLB entry for such a frame if it is mapped to a virtual address
/// and the architecture supports huge TLB entries.
///
/// An instance of `VmFrame` is a handle to a page frame (a physical memory
/// page). A cloned `VmFrame` refers to the same page frame as the original.
/// An instance of `Frame` is a handle to a page frame (a physical memory
/// page). A cloned `Frame` refers to the same page frame as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other. Behind the scene, a reference
/// counter is maintained for each page frame so that when all instances of
/// `VmFrame` that refer to the same page frame are dropped, the page frame
/// `Frame` that refer to the same page frame are dropped, the page frame
/// will be globally freed.
#[derive(Debug, Clone)]
pub struct VmFrame {
pub(in crate::vm) page: Page<FrameMeta>,
pub struct Frame {
pub(in crate::mm) page: Page<FrameMeta>,
}
impl HasPaddr for VmFrame {
impl HasPaddr for Frame {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl VmFrame {
impl Frame {
/// Returns the physical address of the page frame.
pub fn start_paddr(&self) -> Paddr {
self.page.paddr()
@ -73,7 +68,7 @@ impl VmFrame {
paddr_to_vaddr(self.start_paddr()) as *mut u8
}
pub fn copy_from(&self, src: &VmFrame) {
pub fn copy_from(&self, src: &Frame) {
if self.paddr() == src.paddr() {
return;
}
@ -84,7 +79,7 @@ impl VmFrame {
}
}
impl<'a> VmFrame {
impl<'a> Frame {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a> {
// SAFETY: the memory of the page is contiguous and is valid during `'a`.
@ -98,7 +93,7 @@ impl<'a> VmFrame {
}
}
impl VmIo for VmFrame {
impl VmIo for Frame {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
@ -134,23 +129,23 @@ impl PageMeta for FrameMeta {
use core::{marker::PhantomData, ops::Deref};
/// `VmFrameRef` is a struct that can work as `&'a VmFrame`.
/// `VmFrameRef` is a struct that can work as `&'a Frame`.
pub struct VmFrameRef<'a> {
inner: ManuallyDrop<VmFrame>,
_marker: PhantomData<&'a VmFrame>,
inner: ManuallyDrop<Frame>,
_marker: PhantomData<&'a Frame>,
}
impl<'a> Deref for VmFrameRef<'a> {
type Target = VmFrame;
type Target = Frame;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
// SAFETY: `VmFrame` is essentially an `*const FrameMeta` that could be used as a `*const` pointer.
// SAFETY: `Frame` is essentially an `*const FrameMeta` that could be used as a `*const` pointer.
// The pointer is also aligned to 4.
unsafe impl xarray::ItemEntry for VmFrame {
unsafe impl xarray::ItemEntry for Frame {
type Ref<'a> = VmFrameRef<'a> where Self: 'a;
fn into_raw(self) -> *const () {
@ -165,7 +160,7 @@ unsafe impl xarray::ItemEntry for VmFrame {
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
Self::Ref {
inner: ManuallyDrop::new(VmFrame::from_raw(raw)),
inner: ManuallyDrop::new(Frame::from_raw(raw)),
_marker: PhantomData,
}
}

View File

@ -9,7 +9,7 @@ pub mod mapping {
use core::mem::size_of;
use super::MetaSlot;
use crate::vm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE};
use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE};
/// Convert a physical address of a base page to the virtual address of the metadata slot.
pub const fn page_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr {
@ -40,7 +40,7 @@ use static_assertions::const_assert_eq;
use super::Page;
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
vm::{
mm::{
paddr_to_vaddr,
page::allocator::FRAME_ALLOCATOR,
page_size,

View File

@ -11,10 +11,10 @@
//! address space of the users are backed by frames.
pub(crate) mod allocator;
pub(in crate::vm) mod meta;
pub(in crate::mm) mod meta;
use meta::{mapping, MetaSlot, PageMeta};
mod frame;
pub use frame::{VmFrame, VmFrameRef};
pub use frame::{Frame, VmFrameRef};
mod vm_frame_vec;
pub use vm_frame_vec::{FrameVecIter, VmFrameVec};
mod segment;
@ -23,10 +23,10 @@ use core::{
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
};
pub use segment::VmSegment;
pub use segment::Segment;
use super::PAGE_SIZE;
use crate::vm::{paddr_to_vaddr, Paddr, PagingConsts, Vaddr};
use crate::mm::{paddr_to_vaddr, Paddr, PagingConsts, Vaddr};
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
@ -54,7 +54,7 @@ pub enum PageHandleError {
impl<M: PageMeta> Page<M> {
/// Convert an unused page to a `Page` handle for a specific usage.
pub(in crate::vm) fn from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
pub(in crate::mm) fn from_unused(paddr: Paddr) -> Result<Self, PageHandleError> {
if paddr % PAGE_SIZE != 0 {
return Err(PageHandleError::NotAligned);
}
@ -100,7 +100,7 @@ impl<M: PageMeta> Page<M> {
///
/// Also, the caller ensures that the usage of the page is correct. There's
/// no checking of the usage in this function.
pub(in crate::vm) unsafe fn restore(paddr: Paddr) -> Self {
pub(in crate::mm) unsafe fn restore(paddr: Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
@ -118,7 +118,7 @@ impl<M: PageMeta> Page<M> {
/// # Safety
///
/// The safety requirements are the same as [`Page::restore`].
pub(in crate::vm) unsafe fn clone_restore(paddr: &Paddr) -> Self {
pub(in crate::mm) unsafe fn clone_restore(paddr: &Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(*paddr);
let ptr = vaddr as *const MetaSlot;
@ -158,7 +158,7 @@ impl<M: PageMeta> Page<M> {
/// # Safety
///
/// The caller should be sure that the page is exclusively owned.
pub(in crate::vm) unsafe fn meta_mut(&mut self) -> &mut M {
pub(in crate::mm) unsafe fn meta_mut(&mut self) -> &mut M {
unsafe { &mut *(self.ptr as *mut M) }
}

View File

@ -5,19 +5,19 @@ use core::ops::Range;
use super::{
allocator,
meta::{PageMeta, PageUsage, SegmentHeadMeta},
Page, VmFrame,
Frame, Page,
};
use crate::{
vm::{HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE},
mm::{HasPaddr, Paddr, VmIo, VmReader, VmWriter, PAGE_SIZE},
Error, Result,
};
/// A handle to a contiguous range of page frames (physical memory pages).
///
/// The biggest difference between `VmSegment` and `VmFrameVec` is that
/// the page frames must be contiguous for `VmSegment`.
/// The biggest difference between `Segment` and `VmFrameVec` is that
/// the page frames must be contiguous for `Segment`.
///
/// A cloned `VmSegment` refers to the same page frames as the original.
/// A cloned `Segment` refers to the same page frames as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other.
///
@ -30,25 +30,25 @@ use crate::{
/// vm_segment.write_bytes(0, buf)?;
/// ```
#[derive(Debug, Clone)]
pub struct VmSegment {
pub struct Segment {
head_page: Page<SegmentHeadMeta>,
range: Range<usize>,
}
impl HasPaddr for VmSegment {
impl HasPaddr for Segment {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl VmSegment {
/// Creates a new `VmSegment`.
impl Segment {
/// Creates a new `Segment`.
///
/// # Safety
///
/// The given range of page frames must be contiguous and valid for use.
/// The given range of page frames must not have been allocated before,
/// as part of either a `VmFrame` or `VmSegment`.
/// as part of either a `Frame` or `Segment`.
pub(crate) unsafe fn new(paddr: Paddr, nframes: usize) -> Self {
let mut head = Page::<SegmentHeadMeta>::from_unused(paddr).unwrap();
head.meta_mut().seg_len = (nframes * PAGE_SIZE) as u64;
@ -58,11 +58,11 @@ impl VmSegment {
}
}
/// Returns a part of the `VmSegment`.
/// Returns a part of the `Segment`.
///
/// # Panic
///
/// If `range` is not within the range of this `VmSegment`,
/// If `range` is not within the range of this `Segment`,
/// then the method panics.
pub fn range(&self, range: Range<usize>) -> Self {
let orig_range = &self.range;
@ -108,7 +108,7 @@ impl VmSegment {
}
}
impl<'a> VmSegment {
impl<'a> Segment {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a> {
// SAFETY: the memory of the page frames is contiguous and is valid during `'a`.
@ -122,7 +122,7 @@ impl<'a> VmSegment {
}
}
impl VmIo for VmSegment {
impl VmIo for Segment {
fn read_bytes(&self, offset: usize, buf: &mut [u8]) -> Result<()> {
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(buf.len()).ok_or(Error::Overflow)?;
@ -156,8 +156,8 @@ impl PageMeta for SegmentHeadMeta {
}
}
impl From<VmFrame> for VmSegment {
fn from(frame: VmFrame) -> Self {
impl From<Frame> for Segment {
fn from(frame: Frame) -> Self {
Self {
head_page: frame.page.into(),
range: 0..1,

View File

@ -3,26 +3,26 @@
use alloc::{vec, vec::Vec};
use crate::{
vm::{VmFrame, VmIo, VmReader, VmWriter, PAGE_SIZE},
mm::{Frame, VmIo, VmReader, VmWriter, PAGE_SIZE},
Error, Result,
};
/// A collection of base page frames (regular physical memory pages).
///
/// For the most parts, `VmFrameVec` is like `Vec<VmFrame>`. But the
/// For the most parts, `VmFrameVec` is like `Vec<Frame>`. But the
/// implementation may or may not be based on `Vec`. Having a dedicated
/// type to represent a series of page frames is convenient because,
/// more often than not, one needs to operate on a batch of frames rather
/// a single frame.
#[derive(Debug, Clone)]
pub struct VmFrameVec(pub(crate) Vec<VmFrame>);
pub struct VmFrameVec(pub(crate) Vec<Frame>);
impl VmFrameVec {
pub fn get(&self, index: usize) -> Option<&VmFrame> {
pub fn get(&self, index: usize) -> Option<&Frame> {
self.0.get(index)
}
/// returns an empty VmFrame vec
/// returns an empty Frame vec
pub fn empty() -> Self {
Self(Vec::new())
}
@ -32,17 +32,17 @@ impl VmFrameVec {
}
/// Pushs a new frame to the collection.
pub fn push(&mut self, new_frame: VmFrame) {
pub fn push(&mut self, new_frame: Frame) {
self.0.push(new_frame);
}
/// Pop a frame from the collection.
pub fn pop(&mut self) -> Option<VmFrame> {
pub fn pop(&mut self) -> Option<Frame> {
self.0.pop()
}
/// Removes a frame at a position.
pub fn remove(&mut self, at: usize) -> VmFrame {
pub fn remove(&mut self, at: usize) -> Frame {
self.0.remove(at)
}
@ -63,7 +63,7 @@ impl VmFrameVec {
}
/// Returns an iterator
pub fn iter(&self) -> core::slice::Iter<'_, VmFrame> {
pub fn iter(&self) -> core::slice::Iter<'_, Frame> {
self.0.iter()
}
@ -84,13 +84,13 @@ impl VmFrameVec {
self.0.len() * PAGE_SIZE
}
pub fn from_one_frame(frame: VmFrame) -> Self {
pub fn from_one_frame(frame: Frame) -> Self {
Self(vec![frame])
}
}
impl IntoIterator for VmFrameVec {
type Item = VmFrame;
type Item = Frame;
type IntoIter = alloc::vec::IntoIter<Self::Item>;
@ -154,7 +154,7 @@ impl<'a> FrameVecIter<'a> {
}
impl<'a> Iterator for FrameVecIter<'a> {
type Item = &'a VmFrame;
type Item = &'a Frame;
fn next(&mut self) -> Option<Self::Item> {
if self.current >= self.frames.0.len() {

View File

@ -9,7 +9,7 @@ use alloc::vec::Vec;
use super::{pte_index, PageTableEntryTrait};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
vm::{
mm::{
paddr_to_vaddr, page::allocator::FRAME_ALLOCATOR, PageProperty, PagingConstsTrait, Vaddr,
PAGE_SIZE,
},
@ -26,7 +26,7 @@ pub struct BootPageTable<
> {
root_pt: FrameNumber,
// The frames allocated for this page table are not tracked with
// metadata [`crate::vm::frame::meta`]. Here is a record of it
// metadata [`crate::mm::frame::meta`]. Here is a record of it
// for deallocation.
frames: Vec<FrameNumber>,
_pretend_to_use: core::marker::PhantomData<(E, C)>,
@ -101,7 +101,7 @@ fn test_boot_pt() {
use super::page_walk;
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
vm::{CachePolicy, PageFlags, VmAllocOptions},
mm::{CachePolicy, PageFlags, VmAllocOptions},
};
let root_frame = VmAllocOptions::new(1).alloc_single().unwrap();

View File

@ -56,9 +56,9 @@ use align_ext::AlignExt;
use super::{
page_size, pte_index, Child, KernelMode, PageTable, PageTableEntryTrait, PageTableError,
PageTableFrame, PageTableMode, PagingConstsTrait, PagingLevel,
PageTableMode, PageTableNode, PagingConstsTrait, PagingLevel,
};
use crate::vm::{Paddr, PageProperty, Vaddr, VmFrame};
use crate::mm::{Frame, Paddr, PageProperty, Vaddr};
#[derive(Clone, Debug)]
pub(crate) enum PageTableQueryResult {
@ -68,7 +68,7 @@ pub(crate) enum PageTableQueryResult {
},
Mapped {
va: Vaddr,
frame: VmFrame,
frame: Frame,
prop: PageProperty,
},
MappedUntracked {
@ -94,7 +94,7 @@ where
[(); C::NR_LEVELS as usize]:,
{
pt: &'a PageTable<M, E, C>,
guards: [Option<PageTableFrame<E, C>>; C::NR_LEVELS as usize],
guards: [Option<PageTableNode<E, C>>; C::NR_LEVELS as usize],
level: PagingLevel, // current level
guard_level: PagingLevel, // from guard_level to level, the locks are held
va: Vaddr, // current virtual address
@ -246,7 +246,7 @@ where
}
}
fn cur_node(&self) -> &PageTableFrame<E, C> {
fn cur_node(&self) -> &PageTableNode<E, C> {
self.guards[(C::NR_LEVELS - self.level) as usize]
.as_ref()
.unwrap()
@ -267,7 +267,7 @@ where
/// Tell if the current virtual range must contain untracked mappings.
///
/// In the kernel mode, this is aligned with the definition in [`crate::vm::kspace`].
/// In the kernel mode, this is aligned with the definition in [`crate::mm::kspace`].
/// Only linear mappings in the kernel are considered as untracked mappings.
///
/// All mappings in the user mode are tracked. And all mappings in the IOMMU
@ -275,7 +275,7 @@ where
fn in_untracked_range(&self) -> bool {
TypeId::of::<M>() == TypeId::of::<crate::arch::iommu::DeviceMode>()
|| TypeId::of::<M>() == TypeId::of::<KernelMode>()
&& !crate::vm::kspace::VMALLOC_VADDR_RANGE.contains(&self.va)
&& !crate::mm::kspace::VMALLOC_VADDR_RANGE.contains(&self.va)
}
}
@ -385,7 +385,7 @@ where
}
}
/// Map the range starting from the current address to a `VmFrame`.
/// Map the range starting from the current address to a `Frame`.
///
/// # Panic
///
@ -398,7 +398,7 @@ where
///
/// The caller should ensure that the virtual range being mapped does
/// not affect kernel's memory safety.
pub(crate) unsafe fn map(&mut self, frame: VmFrame, prop: PageProperty) {
pub(crate) unsafe fn map(&mut self, frame: Frame, prop: PageProperty) {
let end = self.0.va + frame.size();
assert!(end <= self.0.barrier_va.end);
debug_assert!(!self.0.in_untracked_range());
@ -599,7 +599,7 @@ where
/// Consume itself and leak the root guard for the caller if it locked the root level.
///
/// It is useful when the caller wants to keep the root guard while the cursor should be dropped.
pub(super) fn leak_root_guard(mut self) -> Option<PageTableFrame<E, C>> {
pub(super) fn leak_root_guard(mut self) -> Option<PageTableNode<E, C>> {
if self.0.guard_level != C::NR_LEVELS {
return None;
}
@ -616,7 +616,7 @@ where
/// This method will create a new child frame and go down to it.
fn level_down_create(&mut self) {
debug_assert!(self.0.level > 1);
let new_frame = PageTableFrame::<E, C>::alloc(self.0.level - 1);
let new_frame = PageTableNode::<E, C>::alloc(self.0.level - 1);
let idx = self.0.cur_idx();
let untracked = self.0.in_untracked_range();
self.cur_node_mut()
@ -640,7 +640,7 @@ where
self.0.guards[(C::NR_LEVELS - self.0.level) as usize] = Some(new_frame.lock());
}
fn cur_node_mut(&mut self) -> &mut PageTableFrame<E, C> {
fn cur_node_mut(&mut self) -> &mut PageTableNode<E, C> {
self.0.guards[(C::NR_LEVELS - self.0.level) as usize]
.as_mut()
.unwrap()

View File

@ -28,7 +28,7 @@ use core::{marker::PhantomData, mem::ManuallyDrop, ops::Range, panic, sync::atom
use super::{nr_subpage_per_huge, page_size, PageTableEntryTrait};
use crate::{
arch::mm::{PageTableEntry, PagingConsts},
vm::{
mm::{
paddr_to_vaddr,
page::{
allocator::FRAME_ALLOCATOR,
@ -36,7 +36,7 @@ use crate::{
Page,
},
page_prop::PageProperty,
Paddr, PagingConstsTrait, PagingLevel, VmFrame, PAGE_SIZE,
Frame, Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
},
};
@ -47,9 +47,9 @@ use crate::{
/// the page table frame and subsequent children will be freed.
///
/// Only the CPU or a PTE can access a page table frame using a raw handle. To access the page
/// table frame from the kernel code, use the handle [`PageTableFrame`].
/// table frame from the kernel code, use the handle [`PageTableNode`].
#[derive(Debug)]
pub(super) struct RawPageTableFrame<E: PageTableEntryTrait, C: PagingConstsTrait>
pub(super) struct RawPageTableNode<E: PageTableEntryTrait, C: PagingConstsTrait>
where
[(); C::NR_LEVELS as usize]:,
{
@ -58,7 +58,7 @@ where
_phantom: PhantomData<(E, C)>,
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> RawPageTableFrame<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> RawPageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
@ -67,7 +67,7 @@ where
}
/// Convert a raw handle to an accessible handle by pertaining the lock.
pub(super) fn lock(self) -> PageTableFrame<E, C> {
pub(super) fn lock(self) -> PageTableNode<E, C> {
let page = unsafe { Page::<PageTablePageMeta<E, C>>::restore(self.paddr()) };
debug_assert!(page.meta().level == self.level);
// Acquire the lock.
@ -81,7 +81,7 @@ where
}
// Prevent dropping the handle.
let _ = ManuallyDrop::new(self);
PageTableFrame::<E, C> { page }
PageTableNode::<E, C> { page }
}
/// Create a copy of the handle.
@ -116,7 +116,7 @@ where
use crate::{
arch::mm::{activate_page_table, current_page_table_paddr},
vm::CachePolicy,
mm::CachePolicy,
};
debug_assert_eq!(self.level, PagingConsts::NR_LEVELS);
@ -137,7 +137,7 @@ where
// Decrement the reference count of the last activated page table.
// Boot page tables are not tracked with [`PageTableFrame`], but
// Boot page tables are not tracked with [`PageTableNode`], but
// all page tables after the boot stage are tracked.
//
// TODO: the `cpu_local` implementation currently is underpowered,
@ -158,7 +158,7 @@ where
}
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for RawPageTableFrame<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for RawPageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
@ -175,7 +175,7 @@ where
/// table frame has no references. You can set the page table frame as a child of another
/// page table frame.
#[derive(Debug)]
pub(super) struct PageTableFrame<
pub(super) struct PageTableNode<
E: PageTableEntryTrait = PageTableEntry,
C: PagingConstsTrait = PagingConsts,
> where
@ -190,14 +190,14 @@ pub(super) enum Child<E: PageTableEntryTrait = PageTableEntry, C: PagingConstsTr
where
[(); C::NR_LEVELS as usize]:,
{
PageTable(RawPageTableFrame<E, C>),
Frame(VmFrame),
PageTable(RawPageTableNode<E, C>),
Frame(Frame),
/// Frames not tracked by handles.
Untracked(Paddr),
None,
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableFrame<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{
@ -227,12 +227,12 @@ where
}
/// Convert the handle into a raw handle to be stored in a PTE or CPU.
pub(super) fn into_raw(self) -> RawPageTableFrame<E, C> {
pub(super) fn into_raw(self) -> RawPageTableNode<E, C> {
let level = self.level();
let raw = self.page.paddr();
self.page.meta().lock.store(0, Ordering::Release);
core::mem::forget(self);
RawPageTableFrame {
RawPageTableNode {
raw,
level,
_phantom: PhantomData,
@ -240,9 +240,9 @@ where
}
/// Get a raw handle while still preserving the original handle.
pub(super) fn clone_raw(&self) -> RawPageTableFrame<E, C> {
pub(super) fn clone_raw(&self) -> RawPageTableNode<E, C> {
core::mem::forget(self.page.clone());
RawPageTableFrame {
RawPageTableNode {
raw: self.page.paddr(),
level: self.level(),
_phantom: PhantomData,
@ -261,7 +261,7 @@ where
core::mem::forget(unsafe {
Page::<PageTablePageMeta<E, C>>::clone_restore(&paddr)
});
Child::PageTable(RawPageTableFrame {
Child::PageTable(RawPageTableNode {
raw: paddr,
level: self.level() - 1,
_phantom: PhantomData,
@ -269,7 +269,7 @@ where
} else if tracked {
let page = unsafe { Page::<FrameMeta>::restore(paddr) };
core::mem::forget(page.clone());
Child::Frame(VmFrame { page })
Child::Frame(Frame { page })
} else {
Child::Untracked(paddr)
}
@ -335,7 +335,7 @@ where
pub(super) fn set_child_pt(
&mut self,
idx: usize,
pt: RawPageTableFrame<E, C>,
pt: RawPageTableNode<E, C>,
in_untracked_range: bool,
) {
// They should be ensured by the cursor.
@ -348,7 +348,7 @@ where
}
/// Map a frame at a given index.
pub(super) fn set_child_frame(&mut self, idx: usize, frame: VmFrame, prop: PageProperty) {
pub(super) fn set_child_frame(&mut self, idx: usize, frame: Frame, prop: PageProperty) {
// They should be ensured by the cursor.
debug_assert!(idx < nr_subpage_per_huge::<C>());
debug_assert_eq!(frame.level(), self.level());
@ -391,7 +391,7 @@ where
panic!("`split_untracked_huge` not called on an untracked huge page");
};
let prop = self.read_pte_prop(idx);
let mut new_frame = PageTableFrame::<E, C>::alloc(self.level() - 1);
let mut new_frame = PageTableNode::<E, C>::alloc(self.level() - 1);
for i in 0..nr_subpage_per_huge::<C>() {
let small_pa = pa + i * page_size::<C>(self.level() - 1);
unsafe { new_frame.set_child_untracked(i, small_pa, prop) };
@ -467,7 +467,7 @@ where
}
}
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableFrame<E, C>
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for PageTableNode<E, C>
where
[(); C::NR_LEVELS as usize]:,
{

View File

@ -18,7 +18,7 @@ pub(crate) use cursor::{Cursor, CursorMut, PageTableQueryResult};
#[cfg(ktest)]
mod test;
pub(in crate::vm) mod boot_pt;
pub(in crate::mm) mod boot_pt;
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PageTableError {
@ -82,7 +82,7 @@ pub(crate) struct PageTable<
> where
[(); C::NR_LEVELS as usize]:,
{
root: RawPageTableFrame<E, C>,
root: RawPageTableNode<E, C>,
_phantom: PhantomData<M>,
}
@ -160,7 +160,7 @@ impl PageTable<KernelMode> {
let mut root_frame = self.root.copy_handle().lock();
for i in start..end {
if !root_frame.read_pte(i).is_present() {
let frame = PageTableFrame::alloc(PagingConsts::NR_LEVELS - 1);
let frame = PageTableNode::alloc(PagingConsts::NR_LEVELS - 1);
root_frame.set_child_pt(i, frame.into_raw(), i < NR_PTES_PER_NODE * 3 / 4);
}
}
@ -174,7 +174,7 @@ where
/// Create a new empty page table. Useful for the kernel page table and IOMMU page tables only.
pub(crate) fn empty() -> Self {
PageTable {
root: PageTableFrame::<E, C>::alloc(C::NR_LEVELS).into_raw(),
root: PageTableNode::<E, C>::alloc(C::NR_LEVELS).into_raw(),
_phantom: PhantomData,
}
}

View File

@ -3,7 +3,7 @@
use core::mem::ManuallyDrop;
use super::*;
use crate::vm::{
use crate::mm::{
kspace::LINEAR_MAPPING_BASE_VADDR,
page_prop::{CachePolicy, PageFlags},
VmAllocOptions,
@ -43,7 +43,7 @@ fn test_tracked_map_unmap() {
#[ktest]
fn test_untracked_map_unmap() {
let pt = PageTable::<KernelMode>::empty();
const UNTRACKED_OFFSET: usize = crate::vm::kspace::LINEAR_MAPPING_BASE_VADDR;
const UNTRACKED_OFFSET: usize = crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR;
let from_ppn = 13245..512 * 512 + 23456;
let to_ppn = from_ppn.start - 11010..from_ppn.end - 11010;
@ -172,7 +172,7 @@ impl PagingConstsTrait for VeryHugePagingConsts {
#[ktest]
fn test_untracked_large_protect_query() {
let pt = PageTable::<KernelMode, PageTableEntry, VeryHugePagingConsts>::empty();
const UNTRACKED_OFFSET: usize = crate::vm::kspace::LINEAR_MAPPING_BASE_VADDR;
const UNTRACKED_OFFSET: usize = crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR;
let gmult = 512 * 512;
let from_ppn = gmult - 512..gmult + gmult + 514;

View File

@ -13,11 +13,11 @@ use crate::{
arch::mm::{
tlb_flush_addr_range, tlb_flush_all_excluding_global, PageTableEntry, PagingConsts,
},
prelude::*,
vm::{
mm::{
page_table::{Cursor, PageTableQueryResult as PtQr},
VmFrame, MAX_USERSPACE_VADDR,
Frame, MAX_USERSPACE_VADDR,
},
prelude::*,
Error,
};
@ -31,7 +31,7 @@ use crate::{
///
/// A newly-created `VmSpace` is not backed by any physical memory pages.
/// To provide memory pages for a `VmSpace`, one can allocate and map
/// physical memory (`VmFrame`s) to the `VmSpace`.
/// physical memory (`Frame`s) to the `VmSpace`.
#[derive(Debug)]
pub struct VmSpace {
pt: PageTable<UserMode>,
@ -103,7 +103,7 @@ impl VmSpace {
};
for frame in frames.into_iter() {
// SAFETY: mapping in the user space with `VmFrame` is safe.
// SAFETY: mapping in the user space with `Frame` is safe.
unsafe {
cursor.map(frame, prop);
}
@ -299,7 +299,7 @@ pub enum VmQueryResult {
},
Mapped {
va: Vaddr,
frame: VmFrame,
frame: Frame,
prop: PageProperty,
},
}

View File

@ -11,6 +11,6 @@ pub use aster_main::aster_main;
pub use crate::{
early_print as print, early_println as println,
mm::{Paddr, Vaddr},
panicking::abort,
vm::{Paddr, Vaddr},
};

View File

@ -12,10 +12,10 @@ use super::{
pub(crate) use crate::arch::task::{context_switch, TaskContext};
use crate::{
cpu::CpuSet,
mm::{kspace::KERNEL_PAGE_TABLE, PageFlags, Segment, VmAllocOptions, PAGE_SIZE},
prelude::*,
sync::{SpinLock, SpinLockGuard},
user::UserSpace,
vm::{kspace::KERNEL_PAGE_TABLE, PageFlags, VmAllocOptions, VmSegment, PAGE_SIZE},
};
pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE * 64;
@ -35,7 +35,7 @@ pub trait TaskContextApi {
}
pub struct KernelStack {
segment: VmSegment,
segment: Segment,
has_guard_page: bool,
}
@ -56,7 +56,7 @@ impl KernelStack {
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let guard_page_vaddr = {
let guard_page_paddr = stack_segment.start_paddr();
crate::vm::paddr_to_vaddr(guard_page_paddr)
crate::mm::paddr_to_vaddr(guard_page_paddr)
};
// SAFETY: the segment allocated is not used by others so we can protect it.
unsafe {
@ -84,7 +84,7 @@ impl Drop for KernelStack {
let page_table = KERNEL_PAGE_TABLE.get().unwrap();
let guard_page_vaddr = {
let guard_page_paddr = self.segment.start_paddr();
crate::vm::paddr_to_vaddr(guard_page_paddr)
crate::mm::paddr_to_vaddr(guard_page_paddr)
};
// SAFETY: the segment allocated is not used by others so we can protect it.
unsafe {
@ -293,7 +293,7 @@ impl TaskOptions {
// to at least 16 bytes. And a larger alignment is needed if larger arguments
// are passed to the function. The `kernel_task_entry` function does not
// have any arguments, so we only need to align the stack pointer to 16 bytes.
ctx.set_stack_pointer(crate::vm::paddr_to_vaddr(new_task.kstack.end_paddr() - 16));
ctx.set_stack_pointer(crate::mm::paddr_to_vaddr(new_task.kstack.end_paddr() - 16));
Ok(Arc::new(new_task))
}

View File

@ -4,7 +4,7 @@
use trapframe::TrapFrame;
use crate::{cpu::UserContext, prelude::*, task::Task, vm::VmSpace};
use crate::{cpu::UserContext, mm::VmSpace, prelude::*, task::Task};
/// A user space.
///