From 983a6af3cc6f4360f98255ecf4dd1f0ccbf2a1b3 Mon Sep 17 00:00:00 2001 From: Zhang Junyang Date: Wed, 25 Dec 2024 22:53:24 +0800 Subject: [PATCH] Polish the doc and rename variables in `ostd::mm` --- kernel/comps/block/src/bio.rs | 8 +- kernel/comps/virtio/src/queue.rs | 2 +- kernel/libs/aster-util/src/safe_ptr.rs | 2 +- kernel/libs/aster-util/src/segment_slice.rs | 32 +-- kernel/src/fs/exfat/fs.rs | 6 +- kernel/src/fs/exfat/inode.rs | 6 +- kernel/src/fs/ext2/block_group.rs | 6 +- kernel/src/fs/ext2/fs.rs | 4 +- kernel/src/fs/ext2/inode.rs | 12 +- kernel/src/fs/ext2/prelude.rs | 2 +- kernel/src/fs/ramfs/fs.rs | 6 +- kernel/src/fs/utils/page_cache.rs | 16 +- kernel/src/vdso.rs | 6 +- kernel/src/vm/util.rs | 4 +- kernel/src/vm/vmar/vm_mapping.rs | 12 +- kernel/src/vm/vmo/dyn_cap.rs | 8 +- kernel/src/vm/vmo/mod.rs | 38 ++-- kernel/src/vm/vmo/options.rs | 6 +- kernel/src/vm/vmo/pager.rs | 6 +- kernel/src/vm/vmo/static_cap.rs | 8 +- .../lib.rs | 2 +- .../x86/iommu/interrupt_remapping/table.rs | 2 +- ostd/src/mm/dma/dma_coherent.rs | 11 +- ostd/src/mm/dma/dma_stream.rs | 12 +- ostd/src/mm/frame/allocator.rs | 21 +- ostd/src/mm/frame/meta.rs | 145 +++++++------- ostd/src/mm/frame/mod.rs | 184 ++++++++++-------- ostd/src/mm/frame/segment.rs | 131 ++++++------- ostd/src/mm/frame/untyped.rs | 56 +++--- ostd/src/mm/heap_allocator/mod.rs | 4 +- ostd/src/mm/io.rs | 10 +- ostd/src/mm/kspace/kvirt_area.rs | 6 +- ostd/src/mm/kspace/mod.rs | 2 +- ostd/src/mm/mod.rs | 6 +- ostd/src/mm/page_table/boot_pt.rs | 8 +- ostd/src/mm/page_table/cursor.rs | 12 +- ostd/src/mm/page_table/node/child.rs | 12 +- ostd/src/mm/page_table/node/mod.rs | 8 +- ostd/src/mm/stat/mod.rs | 6 +- ostd/src/mm/tlb.rs | 8 +- ostd/src/mm/vm_space.rs | 8 +- 41 files changed, 430 insertions(+), 414 deletions(-) diff --git a/kernel/comps/block/src/bio.rs b/kernel/comps/block/src/bio.rs index f18759e5b..be12fb923 100644 --- a/kernel/comps/block/src/bio.rs +++ b/kernel/comps/block/src/bio.rs @@ -5,7 +5,7 @@ use bitvec::array::BitArray; use int_to_c_enum::TryFromInt; use ostd::{ mm::{ - DmaDirection, DmaStream, DmaStreamSlice, DynUSegment, FrameAllocOptions, Infallible, VmIo, + DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, USegment, VmIo, VmReader, VmWriter, }, sync::{SpinLock, WaitQueue}, @@ -442,8 +442,8 @@ impl<'a> BioSegment { } } - /// Constructs a new `BioSegment` with a given `DynUSegment` and the bio direction. - pub fn new_from_segment(segment: DynUSegment, direction: BioDirection) -> Self { + /// Constructs a new `BioSegment` with a given `USegment` and the bio direction. + pub fn new_from_segment(segment: USegment, direction: BioDirection) -> Self { let len = segment.size(); let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap(); Self { @@ -481,7 +481,7 @@ impl<'a> BioSegment { /// Returns the inner VM segment. #[cfg(ktest)] - pub fn inner_segment(&self) -> &DynUSegment { + pub fn inner_segment(&self) -> &USegment { self.inner.dma_slice.stream().segment() } diff --git a/kernel/comps/virtio/src/queue.rs b/kernel/comps/virtio/src/queue.rs index 6112ac10c..41401b75e 100644 --- a/kernel/comps/virtio/src/queue.rs +++ b/kernel/comps/virtio/src/queue.rs @@ -76,7 +76,7 @@ impl VirtQueue { } let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() { - // Currently, we use one DynUFrame to place the descriptors and available rings, one DynUFrame to place used rings + // Currently, we use one UFrame to place the descriptors and available rings, one UFrame to place used rings // because the virtio-mmio legacy required the address to be continuous. The max queue size is 128. if size > 128 { return Err(QueueError::InvalidArgs); diff --git a/kernel/libs/aster-util/src/safe_ptr.rs b/kernel/libs/aster-util/src/safe_ptr.rs index 89b1c26ff..594169121 100644 --- a/kernel/libs/aster-util/src/safe_ptr.rs +++ b/kernel/libs/aster-util/src/safe_ptr.rs @@ -54,7 +54,7 @@ use ostd::{ /// /// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo` /// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and -/// `DynUFrame`. The blanket implementations of `VmIo` also include pointer-like +/// `UFrame`. The blanket implementations of `VmIo` also include pointer-like /// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box`, /// and `Arc`. /// diff --git a/kernel/libs/aster-util/src/segment_slice.rs b/kernel/libs/aster-util/src/segment_slice.rs index e0971b97d..7dca0b772 100644 --- a/kernel/libs/aster-util/src/segment_slice.rs +++ b/kernel/libs/aster-util/src/segment_slice.rs @@ -2,41 +2,41 @@ // SPDX-License-Identifier: MPL-2.0 -//! Provides [`SegmentSlice`] for quick duplication and slicing over [`DynUSegment`]. +//! Provides [`SegmentSlice`] for quick duplication and slicing over [`USegment`]. use alloc::sync::Arc; use core::ops::Range; use ostd::{ mm::{ - DynUFrame, DynUSegment, FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedMem, - VmIo, VmReader, VmWriter, PAGE_SIZE, + FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UFrame, USegment, UntypedMem, VmIo, + VmReader, VmWriter, PAGE_SIZE, }, Error, Result, }; -/// A reference to a slice of a [`DynUSegment`]. +/// A reference to a slice of a [`USegment`]. /// /// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference -/// count. While cloning a [`DynUSegment`] will increment the reference count of +/// count. While cloning a [`USegment`] will increment the reference count of /// many underlying pages. /// /// The downside is that the [`SegmentSlice`] requires heap allocation. Also, -/// if any [`SegmentSlice`] of the original [`DynUSegment`] is alive, all pages in -/// the original [`DynUSegment`], including the pages that are not referenced, will +/// if any [`SegmentSlice`] of the original [`USegment`] is alive, all pages in +/// the original [`USegment`], including the pages that are not referenced, will /// not be freed. #[derive(Debug, Clone)] pub struct SegmentSlice { - inner: Arc, + inner: Arc, range: Range, } impl SegmentSlice { - /// Returns a part of the `DynUSegment`. + /// Returns a part of the `USegment`. /// /// # Panics /// - /// If `range` is not within the range of this `DynUSegment`, + /// If `range` is not within the range of this `USegment`, /// then the method panics. pub fn range(&self, range: Range) -> Self { let orig_range = &self.range; @@ -124,8 +124,8 @@ impl VmIo for SegmentSlice { } } -impl From for SegmentSlice { - fn from(segment: DynUSegment) -> Self { +impl From for SegmentSlice { + fn from(segment: USegment) -> Self { let range = 0..segment.size() / PAGE_SIZE; Self { inner: Arc::new(segment), @@ -134,7 +134,7 @@ impl From for SegmentSlice { } } -impl From for DynUSegment { +impl From for USegment { fn from(slice: SegmentSlice) -> Self { let start = slice.range.start * PAGE_SIZE; let end = slice.range.end * PAGE_SIZE; @@ -142,8 +142,8 @@ impl From for DynUSegment { } } -impl From for SegmentSlice { - fn from(frame: DynUFrame) -> Self { - SegmentSlice::from(DynUSegment::from(frame)) +impl From for SegmentSlice { + fn from(frame: UFrame) -> Self { + SegmentSlice::from(USegment::from(frame)) } } diff --git a/kernel/src/fs/exfat/fs.rs b/kernel/src/fs/exfat/fs.rs index fc9313209..979ae8709 100644 --- a/kernel/src/fs/exfat/fs.rs +++ b/kernel/src/fs/exfat/fs.rs @@ -12,7 +12,7 @@ use aster_block::{ }; use hashbrown::HashMap; use lru::LruCache; -use ostd::mm::DynUFrame; +use ostd::mm::UFrame; pub(super) use ostd::mm::VmIo; use super::{ @@ -368,7 +368,7 @@ impl ExfatFS { } impl PageCacheBackend for ExfatFS { - fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid read size") } @@ -380,7 +380,7 @@ impl PageCacheBackend for ExfatFS { Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid write size") } diff --git a/kernel/src/fs/exfat/inode.rs b/kernel/src/fs/exfat/inode.rs index d1497fc2f..922cfcf98 100644 --- a/kernel/src/fs/exfat/inode.rs +++ b/kernel/src/fs/exfat/inode.rs @@ -13,7 +13,7 @@ use aster_block::{ BLOCK_SIZE, }; use aster_rights::Full; -use ostd::mm::{DynUFrame, VmIo}; +use ostd::mm::{UFrame, VmIo}; use super::{ constants::*, @@ -135,7 +135,7 @@ struct ExfatInodeInner { } impl PageCacheBackend for ExfatInode { - fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result { let inner = self.inner.read(); if inner.size < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "Invalid read size") @@ -150,7 +150,7 @@ impl PageCacheBackend for ExfatInode { Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result { let inner = self.inner.read(); let sector_size = inner.fs().sector_size(); diff --git a/kernel/src/fs/ext2/block_group.rs b/kernel/src/fs/ext2/block_group.rs index fa2c6c4d0..ebf65e787 100644 --- a/kernel/src/fs/ext2/block_group.rs +++ b/kernel/src/fs/ext2/block_group.rs @@ -28,7 +28,7 @@ struct BlockGroupImpl { impl BlockGroup { /// Loads and constructs a block group. pub fn load( - group_descriptors_segment: &DynUSegment, + group_descriptors_segment: &USegment, idx: usize, block_device: &dyn BlockDevice, super_block: &SuperBlock, @@ -318,7 +318,7 @@ impl Debug for BlockGroup { } impl PageCacheBackend for BlockGroupImpl { - fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; let bio_segment = BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice); @@ -328,7 +328,7 @@ impl PageCacheBackend for BlockGroupImpl { .read_blocks_async(bid, bio_segment) } - fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; let bio_segment = BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice); diff --git a/kernel/src/fs/ext2/fs.rs b/kernel/src/fs/ext2/fs.rs index a8c36b25c..5a3e31a47 100644 --- a/kernel/src/fs/ext2/fs.rs +++ b/kernel/src/fs/ext2/fs.rs @@ -23,7 +23,7 @@ pub struct Ext2 { blocks_per_group: Ext2Bid, inode_size: usize, block_size: usize, - group_descriptors_segment: DynUSegment, + group_descriptors_segment: USegment, self_ref: Weak, } @@ -63,7 +63,7 @@ impl Ext2 { // Load the block groups information let load_block_groups = |fs: Weak, block_device: &dyn BlockDevice, - group_descriptors_segment: &DynUSegment| + group_descriptors_segment: &USegment| -> Result> { let block_groups_count = super_block.block_groups_count() as usize; let mut block_groups = Vec::with_capacity(block_groups_count); diff --git a/kernel/src/fs/ext2/inode.rs b/kernel/src/fs/ext2/inode.rs index 3ee65f555..5c71ac4eb 100644 --- a/kernel/src/fs/ext2/inode.rs +++ b/kernel/src/fs/ext2/inode.rs @@ -1733,7 +1733,7 @@ impl InodeImpl { writer: &mut VmWriter, ) -> Result; pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>; - pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result; + pub fn read_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result; pub fn write_blocks_async( &self, bid: Ext2Bid, @@ -1741,7 +1741,7 @@ impl InodeImpl { reader: &mut VmReader, ) -> Result; pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>; - pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result; + pub fn write_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result; } /// Manages the inode blocks and block I/O operations. @@ -1789,7 +1789,7 @@ impl InodeBlockManager { } } - pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result { + pub fn read_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result { let mut bio_waiter = BioWaiter::new(); for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { @@ -1834,7 +1834,7 @@ impl InodeBlockManager { } } - pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result { + pub fn write_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result { let mut bio_waiter = BioWaiter::new(); for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { @@ -1858,12 +1858,12 @@ impl InodeBlockManager { } impl PageCacheBackend for InodeBlockManager { - fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result { let bid = idx as Ext2Bid; self.read_block_async(bid, frame) } - fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result { let bid = idx as Ext2Bid; self.write_block_async(bid, frame) } diff --git a/kernel/src/fs/ext2/prelude.rs b/kernel/src/fs/ext2/prelude.rs index 588d7ff9c..ce780a61d 100644 --- a/kernel/src/fs/ext2/prelude.rs +++ b/kernel/src/fs/ext2/prelude.rs @@ -13,7 +13,7 @@ pub(super) use aster_block::{ }; pub(super) use aster_rights::Full; pub(super) use ostd::{ - mm::{DynUFrame, DynUSegment, Frame, FrameAllocOptions, Segment, VmIo}, + mm::{Frame, FrameAllocOptions, Segment, UFrame, USegment, VmIo}, sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard}, }; pub(super) use static_assertions::const_assert; diff --git a/kernel/src/fs/ramfs/fs.rs b/kernel/src/fs/ramfs/fs.rs index f4569f7dc..f14cd266e 100644 --- a/kernel/src/fs/ramfs/fs.rs +++ b/kernel/src/fs/ramfs/fs.rs @@ -11,7 +11,7 @@ use aster_rights::Full; use aster_util::slot_vec::SlotVec; use hashbrown::HashMap; use ostd::{ - mm::{DynUFrame, UntypedMem, VmIo}, + mm::{UFrame, UntypedMem, VmIo}, sync::{PreemptDisabled, RwLockWriteGuard}, }; @@ -484,7 +484,7 @@ impl RamInode { } impl PageCacheBackend for RamInode { - fn read_page_async(&self, _idx: usize, frame: &DynUFrame) -> Result { + fn read_page_async(&self, _idx: usize, frame: &UFrame) -> Result { // Initially, any block/page in a RamFs inode contains all zeros frame .writer() @@ -494,7 +494,7 @@ impl PageCacheBackend for RamInode { Ok(BioWaiter::new()) } - fn write_page_async(&self, _idx: usize, _frame: &DynUFrame) -> Result { + fn write_page_async(&self, _idx: usize, _frame: &UFrame) -> Result { // do nothing Ok(BioWaiter::new()) } diff --git a/kernel/src/fs/utils/page_cache.rs b/kernel/src/fs/utils/page_cache.rs index 372cb5e10..0689df2a5 100644 --- a/kernel/src/fs/utils/page_cache.rs +++ b/kernel/src/fs/utils/page_cache.rs @@ -14,7 +14,7 @@ use aster_rights::Full; use lru::LruCache; use ostd::{ impl_untyped_frame_meta_for, - mm::{DynUFrame, Frame, FrameAllocOptions, UntypedMem, VmIo}, + mm::{Frame, FrameAllocOptions, UFrame, UntypedMem, VmIo}, }; use crate::{ @@ -388,7 +388,7 @@ impl PageCacheManager { Ok(()) } - fn ondemand_readahead(&self, idx: usize) -> Result { + fn ondemand_readahead(&self, idx: usize) -> Result { let mut pages = self.pages.lock(); let mut ra_state = self.ra_state.lock(); let backend = self.backend(); @@ -445,7 +445,7 @@ impl Debug for PageCacheManager { } impl Pager for PageCacheManager { - fn commit_page(&self, idx: usize) -> Result { + fn commit_page(&self, idx: usize) -> Result { self.ondemand_readahead(idx) } @@ -476,7 +476,7 @@ impl Pager for PageCacheManager { Ok(()) } - fn commit_overwrite(&self, idx: usize) -> Result { + fn commit_overwrite(&self, idx: usize) -> Result { if let Some(page) = self.pages.lock().get(&idx) { return Ok(page.clone().into()); } @@ -573,16 +573,16 @@ impl AtomicPageState { /// This trait represents the backend for the page cache. pub trait PageCacheBackend: Sync + Send { /// Reads a page from the backend asynchronously. - fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result; + fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result; /// Writes a page to the backend asynchronously. - fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result; + fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result; /// Returns the number of pages in the backend. fn npages(&self) -> usize; } impl dyn PageCacheBackend { /// Reads a page from the backend synchronously. - fn read_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> { + fn read_page(&self, idx: usize, frame: &UFrame) -> Result<()> { let waiter = self.read_page_async(idx, frame)?; match waiter.wait() { Some(BioStatus::Complete) => Ok(()), @@ -590,7 +590,7 @@ impl dyn PageCacheBackend { } } /// Writes a page to the backend synchronously. - fn write_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> { + fn write_page(&self, idx: usize, frame: &UFrame) -> Result<()> { let waiter = self.write_page_async(idx, frame)?; match waiter.wait() { Some(BioStatus::Complete) => Ok(()), diff --git a/kernel/src/vdso.rs b/kernel/src/vdso.rs index ddcdc8de8..f79d1ea0b 100644 --- a/kernel/src/vdso.rs +++ b/kernel/src/vdso.rs @@ -21,7 +21,7 @@ use aster_rights::Rights; use aster_time::{read_monotonic_time, Instant}; use aster_util::coeff::Coeff; use ostd::{ - mm::{DynUFrame, VmIo, PAGE_SIZE}, + mm::{UFrame, VmIo, PAGE_SIZE}, sync::SpinLock, Pod, }; @@ -199,9 +199,9 @@ struct Vdso { data: SpinLock, /// The VMO of the entire VDSO, including the library text and the VDSO data. vmo: Arc, - /// The `DynUFrame` that contains the VDSO data. This frame is contained in and + /// The `UFrame` that contains the VDSO data. This frame is contained in and /// will not be removed from the VDSO VMO. - data_frame: DynUFrame, + data_frame: UFrame, } /// A `SpinLock` for the `seq` field in `VdsoData`. diff --git a/kernel/src/vm/util.rs b/kernel/src/vm/util.rs index 26983fb01..ceba2f48f 100644 --- a/kernel/src/vm/util.rs +++ b/kernel/src/vm/util.rs @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MPL-2.0 -use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, UntypedMem}; +use ostd::mm::{Frame, FrameAllocOptions, UFrame, UntypedMem}; use crate::prelude::*; /// Creates a new `Frame<()>` and initializes it with the contents of the `src`. /// /// Note that it only duplicates the contents not the metadata. -pub fn duplicate_frame(src: &DynUFrame) -> Result> { +pub fn duplicate_frame(src: &UFrame) -> Result> { let new_frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?; new_frame.writer().write(&mut src.reader()); Ok(new_frame) diff --git a/kernel/src/vm/vmar/vm_mapping.rs b/kernel/src/vm/vmar/vm_mapping.rs index 7bb56b37d..d02263363 100644 --- a/kernel/src/vm/vmar/vm_mapping.rs +++ b/kernel/src/vm/vmar/vm_mapping.rs @@ -8,8 +8,8 @@ use core::{ use align_ext::AlignExt; use ostd::mm::{ - tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, DynUFrame, FrameAllocOptions, PageFlags, - PageProperty, VmSpace, + tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty, + UFrame, VmSpace, }; use super::interval_set::Interval; @@ -216,7 +216,7 @@ impl VmMapping { Ok(()) } - fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(DynUFrame, bool)> { + fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(UFrame, bool)> { let mut is_readonly = false; let Some(vmo) = &self.vmo else { return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly)); @@ -264,7 +264,7 @@ impl VmMapping { let vm_perms = self.perms - VmPerms::WRITE; let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?; - let operate = move |commit_fn: &mut dyn FnMut() -> Result| { + let operate = move |commit_fn: &mut dyn FnMut() -> Result| { if let VmItem::NotMapped { .. } = cursor.query().unwrap() { // We regard all the surrounding pages as accessed, no matter // if it is really so. Then the hardware won't bother to update @@ -432,7 +432,7 @@ impl MappedVmo { /// /// If the VMO has not committed a frame at this index, it will commit /// one first and return it. - fn get_committed_frame(&self, page_offset: usize) -> Result { + fn get_committed_frame(&self, page_offset: usize) -> Result { debug_assert!(page_offset < self.range.len()); debug_assert!(page_offset % PAGE_SIZE == 0); self.vmo.commit_page(self.range.start + page_offset) @@ -444,7 +444,7 @@ impl MappedVmo { /// perform other operations. fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { debug_assert!(range.start < self.range.len()); debug_assert!(range.end <= self.range.len()); diff --git a/kernel/src/vm/vmo/dyn_cap.rs b/kernel/src/vm/vmo/dyn_cap.rs index bb72cd741..3a4f0619d 100644 --- a/kernel/src/vm/vmo/dyn_cap.rs +++ b/kernel/src/vm/vmo/dyn_cap.rs @@ -3,14 +3,14 @@ use core::ops::Range; use aster_rights::{Rights, TRights}; -use ostd::mm::{DynUFrame, VmIo}; +use ostd::mm::{UFrame, VmIo}; use super::{CommitFlags, Vmo, VmoRightsOp}; use crate::prelude::*; impl Vmo { /// Commits a page at specific offset - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { self.check_rights(Rights::WRITE)?; self.0.commit_page(offset) } @@ -39,7 +39,7 @@ impl Vmo { /// perform other operations. pub(in crate::vm) fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.check_rights(Rights::WRITE)?; self.0 @@ -112,7 +112,7 @@ impl Vmo { /// # Access rights /// /// The method requires the Write right. - pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { + pub fn replace(&self, page: UFrame, page_idx: usize) -> Result<()> { self.check_rights(Rights::WRITE)?; self.0.replace(page, page_idx) } diff --git a/kernel/src/vm/vmo/mod.rs b/kernel/src/vm/vmo/mod.rs index 708d55542..ee5724f74 100644 --- a/kernel/src/vm/vmo/mod.rs +++ b/kernel/src/vm/vmo/mod.rs @@ -11,7 +11,7 @@ use align_ext::AlignExt; use aster_rights::Rights; use ostd::{ collections::xarray::{CursorMut, XArray}, - mm::{DynUFrame, FrameAllocOptions, UntypedMem, VmReader, VmWriter}, + mm::{FrameAllocOptions, UFrame, UntypedMem, VmReader, VmWriter}, }; use crate::prelude::*; @@ -66,8 +66,8 @@ pub use pager::Pager; /// # Implementation /// /// `Vmo` provides high-level APIs for address space management by wrapping -/// around its low-level counterpart [`ostd::mm::DynUFrame`]. -/// Compared with `DynUFrame`, +/// around its low-level counterpart [`ostd::mm::UFrame`]. +/// Compared with `UFrame`, /// `Vmo` is easier to use (by offering more powerful APIs) and /// harder to misuse (thanks to its nature of being capability). #[derive(Debug)] @@ -125,12 +125,12 @@ bitflags! { } } -/// `Pages` is the struct that manages the `DynUFrame`s stored in `Vmo_`. +/// `Pages` is the struct that manages the `UFrame`s stored in `Vmo_`. pub(super) enum Pages { /// `Pages` that cannot be resized. This kind of `Pages` will have a constant size. - Nonresizable(Mutex>, usize), + Nonresizable(Mutex>, usize), /// `Pages` that can be resized and have a variable size. - Resizable(Mutex<(XArray, usize)>), + Resizable(Mutex<(XArray, usize)>), } impl Clone for Pages { @@ -149,7 +149,7 @@ impl Clone for Pages { impl Pages { fn with(&self, func: F) -> R where - F: FnOnce(&mut XArray, usize) -> R, + F: FnOnce(&mut XArray, usize) -> R, { match self { Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size), @@ -201,16 +201,16 @@ impl CommitFlags { } impl Vmo_ { - /// Prepares a new `DynUFrame` for the target index in pages, returns this new frame. - fn prepare_page(&self, page_idx: usize) -> Result { + /// Prepares a new `UFrame` for the target index in pages, returns this new frame. + fn prepare_page(&self, page_idx: usize) -> Result { match &self.pager { None => Ok(FrameAllocOptions::new().alloc_frame()?.into()), Some(pager) => pager.commit_page(page_idx), } } - /// Prepares a new `DynUFrame` for the target index in the VMO, returns this new frame. - fn prepare_overwrite(&self, page_idx: usize) -> Result { + /// Prepares a new `UFrame` for the target index in the VMO, returns this new frame. + fn prepare_overwrite(&self, page_idx: usize) -> Result { if let Some(pager) = &self.pager { pager.commit_overwrite(page_idx) } else { @@ -220,9 +220,9 @@ impl Vmo_ { fn commit_with_cursor( &self, - cursor: &mut CursorMut<'_, DynUFrame>, + cursor: &mut CursorMut<'_, UFrame>, commit_flags: CommitFlags, - ) -> Result { + ) -> Result { let new_page = { if let Some(committed_page) = cursor.load() { // Fast path: return the page directly. @@ -241,7 +241,7 @@ impl Vmo_ { /// Commits the page corresponding to the target offset in the VMO and return that page. /// If the current offset has already been committed, the page will be returned directly. - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { let page_idx = offset / PAGE_SIZE; self.pages.with(|pages, size| { if offset >= size { @@ -279,7 +279,7 @@ impl Vmo_ { commit_flags: CommitFlags, ) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.pages.with(|pages, size| { if range.end > size { @@ -315,7 +315,7 @@ impl Vmo_ { let read_range = offset..(offset + read_len); let mut read_offset = offset % PAGE_SIZE; - let read = move |commit_fn: &mut dyn FnMut() -> Result| { + let read = move |commit_fn: &mut dyn FnMut() -> Result| { let frame = commit_fn()?; frame.reader().skip(read_offset).read_fallible(writer)?; read_offset = 0; @@ -331,7 +331,7 @@ impl Vmo_ { let write_range = offset..(offset + write_len); let mut write_offset = offset % PAGE_SIZE; - let mut write = move |commit_fn: &mut dyn FnMut() -> Result| { + let mut write = move |commit_fn: &mut dyn FnMut() -> Result| { let frame = commit_fn()?; frame.writer().skip(write_offset).write_fallible(reader)?; write_offset = 0; @@ -401,7 +401,7 @@ impl Vmo_ { Ok(()) } - fn decommit_pages(&self, pages: &mut XArray, range: Range) -> Result<()> { + fn decommit_pages(&self, pages: &mut XArray, range: Range) -> Result<()> { let page_idx_range = get_page_idx_range(&range); let mut cursor = pages.cursor_mut(page_idx_range.start as u64); for page_idx in page_idx_range { @@ -426,7 +426,7 @@ impl Vmo_ { self.flags } - fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { + fn replace(&self, page: UFrame, page_idx: usize) -> Result<()> { self.pages.with(|pages, size| { if page_idx >= size / PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo"); diff --git a/kernel/src/vm/vmo/options.rs b/kernel/src/vm/vmo/options.rs index 77a418669..8795d6fc3 100644 --- a/kernel/src/vm/vmo/options.rs +++ b/kernel/src/vm/vmo/options.rs @@ -8,7 +8,7 @@ use align_ext::AlignExt; use aster_rights::{Rights, TRightSet, TRights}; use ostd::{ collections::xarray::XArray, - mm::{DynUFrame, DynUSegment, FrameAllocOptions}, + mm::{FrameAllocOptions, UFrame, USegment}, }; use super::{Pager, Pages, Vmo, VmoFlags}; @@ -137,11 +137,11 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option>) -> Re }) } -fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result> { +fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result> { if flags.contains(VmoFlags::CONTIGUOUS) { // if the vmo is continuous, we need to allocate frames for the vmo let frames_num = size / PAGE_SIZE; - let segment: DynUSegment = FrameAllocOptions::new().alloc_segment(frames_num)?.into(); + let segment: USegment = FrameAllocOptions::new().alloc_segment(frames_num)?.into(); let mut committed_pages = XArray::new(); let mut cursor = committed_pages.cursor_mut(0); for frame in segment { diff --git a/kernel/src/vm/vmo/pager.rs b/kernel/src/vm/vmo/pager.rs index 38cac55b1..8fc11d2fb 100644 --- a/kernel/src/vm/vmo/pager.rs +++ b/kernel/src/vm/vmo/pager.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 -use ostd::mm::DynUFrame; +use ostd::mm::UFrame; use crate::prelude::*; @@ -26,7 +26,7 @@ pub trait Pager: Send + Sync { /// whatever frame that may or may not be the same as the last time. /// /// It is up to the pager to decide the range of valid indices. - fn commit_page(&self, idx: usize) -> Result; + fn commit_page(&self, idx: usize) -> Result; /// Notify the pager that the frame at a specified index has been updated. /// @@ -54,5 +54,5 @@ pub trait Pager: Send + Sync { /// Ask the pager to provide a frame at a specified index. /// Notify the pager that the frame will be fully overwritten soon, so pager can /// choose not to initialize it. - fn commit_overwrite(&self, idx: usize) -> Result; + fn commit_overwrite(&self, idx: usize) -> Result; } diff --git a/kernel/src/vm/vmo/static_cap.rs b/kernel/src/vm/vmo/static_cap.rs index fde449dd4..5e5089faa 100644 --- a/kernel/src/vm/vmo/static_cap.rs +++ b/kernel/src/vm/vmo/static_cap.rs @@ -4,14 +4,14 @@ use core::ops::Range; use aster_rights::{Dup, Rights, TRightSet, TRights, Write}; use aster_rights_proc::require; -use ostd::mm::{DynUFrame, VmIo}; +use ostd::mm::{UFrame, VmIo}; use super::{CommitFlags, Vmo, VmoRightsOp}; use crate::prelude::*; impl Vmo> { /// Commits a page at specific offset. - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { self.check_rights(Rights::WRITE)?; self.0.commit_page(offset) } @@ -41,7 +41,7 @@ impl Vmo> { #[require(R > Write)] pub(in crate::vm) fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.0 .operate_on_range(range, operate, CommitFlags::empty()) @@ -114,7 +114,7 @@ impl Vmo> { /// /// The method requires the Write right. #[require(R > Write)] - pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { + pub fn replace(&self, page: UFrame, page_idx: usize) -> Result<()> { self.0.replace(page, page_idx) } diff --git a/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs b/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs index 21c69f9e9..542873b79 100644 --- a/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs +++ b/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs @@ -40,7 +40,7 @@ fn create_user_space(program: &[u8]) -> UserSpace { .alloc_segment(nbytes / PAGE_SIZE) .unwrap(); // Physical memory pages can be only accessed - // via the `DynUFrame` or `DynUSegment` abstraction. + // via the `UFrame` or `USegment` abstraction. segment.write_bytes(0, program).unwrap(); segment }; diff --git a/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs index 00e61d6ef..e26374f5c 100644 --- a/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs +++ b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs @@ -35,7 +35,7 @@ impl IntRemappingTable { Some(self.handles.get(id).unwrap().clone()) } - /// Creates an Interrupt Remapping Table with one DynUFrame (default). + /// Creates an Interrupt Remapping Table with one `Segment` (default). pub(super) fn new() -> Self { const DEFAULT_PAGES: usize = 1; let segment = FrameAllocOptions::new() diff --git a/ostd/src/mm/dma/dma_coherent.rs b/ostd/src/mm/dma/dma_coherent.rs index b9233ddd0..17c23130c 100644 --- a/ostd/src/mm/dma/dma_coherent.rs +++ b/ostd/src/mm/dma/dma_coherent.rs @@ -13,7 +13,7 @@ use crate::{ io::VmIoOnce, kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE}, page_prop::CachePolicy, - DynUSegment, HasPaddr, Infallible, Paddr, PodOnce, UntypedMem, VmIo, VmReader, VmWriter, + HasPaddr, Infallible, Paddr, PodOnce, USegment, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE, }, prelude::*, @@ -39,7 +39,7 @@ pub struct DmaCoherent { #[derive(Debug)] struct DmaCoherentInner { - segment: DynUSegment, + segment: USegment, start_daddr: Daddr, is_cache_coherent: bool, } @@ -54,10 +54,7 @@ impl DmaCoherent { /// /// The method fails if any part of the given `segment` /// already belongs to a DMA mapping. - pub fn map( - segment: DynUSegment, - is_cache_coherent: bool, - ) -> core::result::Result { + pub fn map(segment: USegment, is_cache_coherent: bool) -> core::result::Result { let frame_count = segment.size() / PAGE_SIZE; let start_paddr = segment.start_paddr(); if !check_and_insert_dma_mapping(start_paddr, frame_count) { @@ -124,7 +121,7 @@ impl HasDaddr for DmaCoherent { } impl Deref for DmaCoherent { - type Target = DynUSegment; + type Target = USegment; fn deref(&self) -> &Self::Target { &self.inner.segment } diff --git a/ostd/src/mm/dma/dma_stream.rs b/ostd/src/mm/dma/dma_stream.rs index ace44c5ee..d54861f81 100644 --- a/ostd/src/mm/dma/dma_stream.rs +++ b/ostd/src/mm/dma/dma_stream.rs @@ -11,7 +11,7 @@ use crate::{ error::Error, mm::{ dma::{dma_type, Daddr, DmaType}, - DynUSegment, HasPaddr, Infallible, Paddr, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE, + HasPaddr, Infallible, Paddr, USegment, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE, }, }; @@ -34,7 +34,7 @@ pub struct DmaStream { #[derive(Debug)] struct DmaStreamInner { - segment: DynUSegment, + segment: USegment, start_daddr: Daddr, /// TODO: remove this field when on x86. #[allow(unused)] @@ -55,11 +55,11 @@ pub enum DmaDirection { } impl DmaStream { - /// Establishes DMA stream mapping for a given [`DynUSegment`]. + /// Establishes DMA stream mapping for a given [`USegment`]. /// /// The method fails if the segment already belongs to a DMA mapping. pub fn map( - segment: DynUSegment, + segment: USegment, direction: DmaDirection, is_cache_coherent: bool, ) -> Result { @@ -107,13 +107,13 @@ impl DmaStream { }) } - /// Gets the underlying [`DynUSegment`]. + /// Gets the underlying [`USegment`]. /// /// Usually, the CPU side should not access the memory /// after the DMA mapping is established because /// there is a chance that the device is updating /// the memory. Do this at your own risk. - pub fn segment(&self) -> &DynUSegment { + pub fn segment(&self) -> &USegment { &self.inner.segment } diff --git a/ostd/src/mm/frame/allocator.rs b/ostd/src/mm/frame/allocator.rs index 4ea06ee12..a7e94cb75 100644 --- a/ostd/src/mm/frame/allocator.rs +++ b/ostd/src/mm/frame/allocator.rs @@ -1,16 +1,13 @@ // SPDX-License-Identifier: MPL-2.0 -//! The physical page memory allocator. -//! -//! TODO: Decouple it with the frame allocator in [`crate::mm::frame::options`] by -//! allocating pages rather untyped memory from this module. +//! The physical memory allocator. use align_ext::AlignExt; use buddy_system_allocator::FrameAllocator; use log::info; use spin::Once; -use super::{meta::FrameMeta, segment::Segment, Frame}; +use super::{meta::AnyFrameMeta, segment::Segment, Frame}; use crate::{ boot::memory_region::MemoryRegionType, error::Error, @@ -54,8 +51,8 @@ impl FrameAllocOptions { } /// Allocates a single frame with additional metadata. - pub fn alloc_frame_with(&self, metadata: M) -> Result> { - let frame = PAGE_ALLOCATOR + pub fn alloc_frame_with(&self, metadata: M) -> Result> { + let frame = FRAME_ALLOCATOR .get() .unwrap() .disable_irq() @@ -85,7 +82,7 @@ impl FrameAllocOptions { /// /// The returned [`Segment`] contains at least one frame. The method returns /// an error if the number of frames is zero. - pub fn alloc_segment_with( + pub fn alloc_segment_with( &self, nframes: usize, metadata_fn: F, @@ -96,7 +93,7 @@ impl FrameAllocOptions { if nframes == 0 { return Err(Error::InvalidArgs); } - let segment = PAGE_ALLOCATOR + let segment = FRAME_ALLOCATOR .get() .unwrap() .disable_irq() @@ -168,6 +165,8 @@ impl CountingFrameAllocator { } } + // TODO: this method should be marked unsafe as invalid arguments will mess + // up the underlying allocator. pub fn dealloc(&mut self, start_frame: usize, count: usize) { self.allocator.dealloc(start_frame, count); self.allocated -= count * PAGE_SIZE; @@ -182,7 +181,7 @@ impl CountingFrameAllocator { } } -pub(in crate::mm) static PAGE_ALLOCATOR: Once> = Once::new(); +pub(in crate::mm) static FRAME_ALLOCATOR: Once> = Once::new(); pub(crate) fn init() { let regions = crate::boot::memory_regions(); @@ -208,5 +207,5 @@ pub(crate) fn init() { } } let counting_allocator = CountingFrameAllocator::new(allocator, total); - PAGE_ALLOCATOR.call_once(|| SpinLock::new(counting_allocator)); + FRAME_ALLOCATOR.call_once(|| SpinLock::new(counting_allocator)); } diff --git a/ostd/src/mm/frame/meta.rs b/ostd/src/mm/frame/meta.rs index 4200fa721..636b72508 100644 --- a/ostd/src/mm/frame/meta.rs +++ b/ostd/src/mm/frame/meta.rs @@ -1,15 +1,18 @@ // SPDX-License-Identifier: MPL-2.0 -//! Metadata management of pages. +//! Metadata management of frames. //! -//! You can picture a globally shared, static, gigantic array of metadata initialized for each page. -//! An entry in the array is called a `MetaSlot`, which contains the metadata of a page. There would -//! be a dedicated small "heap" space in each slot for dynamic metadata. You can store anything as the -//! metadata of a page as long as it's [`Sync`]. +//! You can picture a globally shared, static, gigantic array of metadata +//! initialized for each frame. An entry in the array is called a [`MetaSlot`], +//! which contains the metadata of a frame. There would be a dedicated small +//! "heap" space in each slot for dynamic metadata. You can store anything as +//! the metadata of a frame as long as it's [`Sync`]. //! -//! In the implementation level, the slots are placed in the metadata pages mapped to a certain virtual -//! address. It is faster, simpler, safer and more versatile compared with an actual static array -//! implementation. +//! # Implementation +//! +//! The slots are placed in the metadata pages mapped to a certain virtual +//! address in the kernel space. So finding the metadata of a frame often +//! comes with no costs since the translation is a simple arithmetic operation. pub(crate) mod mapping { //! The metadata of each physical page is linear mapped to fixed virtual addresses @@ -20,15 +23,15 @@ pub(crate) mod mapping { use super::MetaSlot; use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE}; - /// Converts a physical address of a base page to the virtual address of the metadata slot. - pub(crate) const fn page_to_meta(paddr: Paddr) -> Vaddr { + /// Converts a physical address of a base frame to the virtual address of the metadata slot. + pub(crate) const fn frame_to_meta(paddr: Paddr) -> Vaddr { let base = FRAME_METADATA_RANGE.start; let offset = paddr / PAGE_SIZE; base + offset * size_of::() } - /// Converts a virtual address of the metadata slot to the physical address of the page. - pub(crate) const fn meta_to_page(vaddr: Vaddr) -> Paddr { + /// Converts a virtual address of the metadata slot to the physical address of the frame. + pub(crate) const fn meta_to_frame(vaddr: Vaddr) -> Paddr { let base = FRAME_METADATA_RANGE.start; let offset = (vaddr - base) / size_of::(); offset * PAGE_SIZE @@ -58,28 +61,28 @@ use crate::{ panic::abort, }; -/// The maximum number of bytes of the metadata of a page. -pub const PAGE_METADATA_MAX_SIZE: usize = +/// The maximum number of bytes of the metadata of a frame. +pub const FRAME_METADATA_MAX_SIZE: usize = META_SLOT_SIZE - size_of::() - size_of::() - size_of::(); -/// The maximum alignment in bytes of the metadata of a page. -pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::(); +/// The maximum alignment in bytes of the metadata of a frame. +pub const FRAME_METADATA_MAX_ALIGN: usize = align_of::(); const META_SLOT_SIZE: usize = 64; #[repr(C)] pub(in crate::mm) struct MetaSlot { - /// The metadata of the page. + /// The metadata of a frame. /// /// It is placed at the beginning of a slot because: /// - the implementation can simply cast a `*const MetaSlot` - /// to a `*const FrameMeta` for manipulation; + /// to a `*const AnyFrameMeta` for manipulation; /// - if the metadata need special alignment, we can provide /// at most `PAGE_METADATA_ALIGN` bytes of alignment; /// - the subsequent fields can utilize the padding of the /// reference count to save space. /// - /// Don't access this field by a reference to the slot. - _storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>, + /// Don't access this field with a reference to the slot. + _storage: UnsafeCell<[u8; FRAME_METADATA_MAX_SIZE]>, /// The reference count of the page. /// /// Specifically, the reference count has the following meaning: @@ -94,7 +97,7 @@ pub(in crate::mm) struct MetaSlot { /// [`Frame::from_unused`]: super::Frame::from_unused // // Other than this field the fields should be `MaybeUninit`. - // See initialization in `alloc_meta_pages`. + // See initialization in `alloc_meta_frames`. pub(super) ref_count: AtomicU32, /// The virtual table that indicates the type of the metadata. pub(super) vtable_ptr: UnsafeCell>, @@ -103,46 +106,46 @@ pub(in crate::mm) struct MetaSlot { pub(super) const REF_COUNT_UNUSED: u32 = u32::MAX; const REF_COUNT_MAX: u32 = i32::MAX as u32; -type FrameMetaVtablePtr = core::ptr::DynMetadata; +type FrameMetaVtablePtr = core::ptr::DynMetadata; const_assert_eq!(PAGE_SIZE % META_SLOT_SIZE, 0); const_assert_eq!(size_of::(), META_SLOT_SIZE); -/// All page metadata types must implement this trait. +/// All frame metadata types must implement this trait. /// -/// If a page type needs specific drop behavior, it should specify +/// If a frame type needs specific drop behavior, it should specify /// when implementing this trait. When we drop the last handle to -/// this page, the `on_drop` method will be called. The `on_drop` -/// method is called with the physical address of the page. +/// this frame, the `on_drop` method will be called. The `on_drop` +/// method is called with the physical address of the frame. /// /// # Safety /// /// The implemented structure must have a size less than or equal to -/// [`PAGE_METADATA_MAX_SIZE`] and an alignment less than or equal to -/// [`PAGE_METADATA_MAX_ALIGN`]. +/// [`FRAME_METADATA_MAX_SIZE`] and an alignment less than or equal to +/// [`FRAME_METADATA_MAX_ALIGN`]. /// /// The implementer of the `on_drop` method should ensure that the frame is /// safe to be read. -pub unsafe trait FrameMeta: Any + Send + Sync + Debug + 'static { - /// Called when the last handle to the page is dropped. +pub unsafe trait AnyFrameMeta: Any + Send + Sync + Debug + 'static { + /// Called when the last handle to the frame is dropped. fn on_drop(&mut self, reader: &mut VmReader) { let _ = reader; } /// Whether the metadata's associated frame is untyped. /// - /// If a type implements [`UFrameMeta`], this should be `true`. + /// If a type implements [`AnyUFrameMeta`], this should be `true`. /// Otherwise, it should be `false`. /// - /// [`UFrameMeta`]: super::untyped::UFrameMeta + /// [`AnyUFrameMeta`]: super::untyped::AnyUFrameMeta fn is_untyped(&self) -> bool { false } } -/// Makes a structure usable as a page metadata. +/// Makes a structure usable as a frame metadata. /// -/// Directly implementing [`FrameMeta`] is not safe since the size and alignment +/// Directly implementing [`AnyFrameMeta`] is not safe since the size and alignment /// must be checked. This macro provides a safe way to implement the trait with /// compile-time checks. #[macro_export] @@ -150,21 +153,21 @@ macro_rules! impl_frame_meta_for { // Implement without specifying the drop behavior. ($t:ty) => { use static_assertions::const_assert; - const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); - const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); + const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE); + const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN); // SAFETY: The size and alignment of the structure are checked. - unsafe impl $crate::mm::frame::meta::FrameMeta for $t {} + unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t {} }; } pub use impl_frame_meta_for; impl MetaSlot { - /// Increases the page reference count by one. + /// Increases the frame reference count by one. /// /// # Safety /// - /// The caller must have already held a reference to the page. + /// The caller must have already held a reference to the frame. pub(super) unsafe fn inc_ref_count(&self) { let last_ref_cnt = self.ref_count.fetch_add(1, Ordering::Relaxed); debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); @@ -182,8 +185,8 @@ impl MetaSlot { /// /// # Safety /// -/// The caller should ensure that the pointer points to a page's metadata slot. The -/// page should have a last handle to the page, and the page is about to be dropped, +/// The caller should ensure that the pointer points to a frame's metadata slot. The +/// frame should have a last handle to the frame, and the frame is about to be dropped, /// as the metadata slot after this operation becomes uninitialized. pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an @@ -193,14 +196,14 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { // This should be guaranteed as a safety requirement. debug_assert_eq!(slot.ref_count.load(Ordering::Relaxed), 0); - let paddr = mapping::meta_to_page::(ptr as Vaddr); + let paddr = mapping::meta_to_frame::(ptr as Vaddr); - // SAFETY: We have exclusive access to the page metadata. + // SAFETY: We have exclusive access to the frame metadata. let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() }; - // SAFETY: The page metadata is initialized and valid. + // SAFETY: The frame metadata is initialized and valid. let vtable_ptr = unsafe { vtable_ptr.assume_init_read() }; - let meta_ptr: *mut dyn FrameMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr); + let meta_ptr: *mut dyn AnyFrameMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr); // SAFETY: The implementer of the frame metadata decides that if the frame // is safe to be read or not. @@ -209,11 +212,11 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive - // access to the page metadata. + // access to the frame metadata. unsafe { // Invoke the custom `on_drop` handler. (*meta_ptr).on_drop(&mut reader); - // Drop the page metadata. + // Drop the frame metadata. core::ptr::drop_in_place(meta_ptr); } @@ -221,24 +224,24 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { // be reordered after this memory store. slot.ref_count.store(REF_COUNT_UNUSED, Ordering::Release); - // Deallocate the page. - // It would return the page to the allocator for further use. This would be done + // Deallocate the frame. + // It would return the frame to the allocator for further use. This would be done // after the release of the metadata to avoid re-allocation before the metadata // is reset. - allocator::PAGE_ALLOCATOR + allocator::FRAME_ALLOCATOR .get() .unwrap() .lock() .dealloc(paddr / PAGE_SIZE, 1); } -/// The metadata of pages that holds metadata of pages. +/// The metadata of frames that holds metadata of frames. #[derive(Debug, Default)] pub struct MetaPageMeta {} impl_frame_meta_for!(MetaPageMeta); -/// Initializes the metadata of all physical pages. +/// Initializes the metadata of all physical frames. /// /// The function returns a list of `Frame`s containing the metadata. pub(crate) fn init() -> Segment { @@ -248,7 +251,7 @@ pub(crate) fn init() -> Segment { }; info!( - "Initializing page metadata for physical memory up to {:x}", + "Initializing frame metadata for physical memory up to {:x}", max_paddr ); @@ -256,14 +259,14 @@ pub(crate) fn init() -> Segment { super::MAX_PADDR.store(max_paddr, Ordering::Relaxed); - let num_pages = max_paddr / page_size::(1); - let (num_meta_pages, meta_pages) = alloc_meta_pages(num_pages); + let tot_nr_frames = max_paddr / page_size::(1); + let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames); - // Map the metadata pages. + // Map the metadata frames. boot_pt::with_borrow(|boot_pt| { - for i in 0..num_meta_pages { + for i in 0..nr_meta_pages { let frame_paddr = meta_pages + i * PAGE_SIZE; - let vaddr = mapping::page_to_meta::(0) + i * PAGE_SIZE; + let vaddr = mapping::frame_to_meta::(0) + i * PAGE_SIZE; let prop = PageProperty { flags: PageFlags::RW, cache: CachePolicy::Writeback, @@ -275,41 +278,45 @@ pub(crate) fn init() -> Segment { }) .unwrap(); - // Now the metadata pages are mapped, we can initialize the metadata. - Segment::from_unused(meta_pages..meta_pages + num_meta_pages * PAGE_SIZE, |_| { + // Now the metadata frames are mapped, we can initialize the metadata. + Segment::from_unused(meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE, |_| { MetaPageMeta {} }) } -fn alloc_meta_pages(num_pages: usize) -> (usize, Paddr) { - let num_meta_pages = num_pages +fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) { + let nr_meta_pages = tot_nr_frames .checked_mul(size_of::()) .unwrap() .div_ceil(PAGE_SIZE); - let start_paddr = allocator::PAGE_ALLOCATOR + let start_paddr = allocator::FRAME_ALLOCATOR .get() .unwrap() .lock() - .alloc(num_meta_pages) + .alloc(nr_meta_pages) .unwrap() * PAGE_SIZE; let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot; - // Fill the metadata pages with a byte pattern of `REF_COUNT_UNUSED`. + // Fill the metadata frames with a byte pattern of `REF_COUNT_UNUSED`. debug_assert_eq!(REF_COUNT_UNUSED.to_ne_bytes(), [0xff, 0xff, 0xff, 0xff]); - // SAFETY: `slots` and the length is a valid region for the metadata pages + // SAFETY: `slots` and the length is a valid region for the metadata frames // that are going to be treated as metadata slots. The byte pattern is // valid as the initial value of the reference count (other fields are // either not accessed or `MaybeUninit`). unsafe { - core::ptr::write_bytes(slots as *mut u8, 0xff, num_pages * size_of::()); + core::ptr::write_bytes( + slots as *mut u8, + 0xff, + tot_nr_frames * size_of::(), + ); } - (num_meta_pages, start_paddr) + (nr_meta_pages, start_paddr) } -/// Adds a temporary linear mapping for the metadata pages. +/// Adds a temporary linear mapping for the metadata frames. /// /// We only assume boot page table to contain 4G linear mapping. Thus if the /// physical memory is huge we end up depleted of linear virtual memory for diff --git a/ostd/src/mm/frame/mod.rs b/ostd/src/mm/frame/mod.rs index 8fb688dd8..0486e1a06 100644 --- a/ostd/src/mm/frame/mod.rs +++ b/ostd/src/mm/frame/mod.rs @@ -1,18 +1,35 @@ // SPDX-License-Identifier: MPL-2.0 -//! Physical memory page management. +//! Frame (physical memory page) management. //! -//! A page is an aligned, contiguous range of bytes in physical memory. The sizes -//! of base pages and huge pages are architecture-dependent. A page can be mapped -//! to a virtual address using the page table. +//! A frame is an aligned, contiguous range of bytes in physical memory. The +//! sizes of base frames and huge frames (that are mapped as "huge pages") are +//! architecture-dependent. A frame can be mapped to virtual address spaces +//! using the page table. //! -//! Pages can be accessed through page handles, namely, [`Frame`]. A page handle -//! is a reference-counted handle to a page. When all handles to a page are dropped, -//! the page is released and can be reused. +//! Frames can be accessed through frame handles, namely, [`Frame`]. A frame +//! handle is a reference-counted pointer to a frame. When all handles to a +//! frame are dropped, the frame is released and can be reused. Contiguous +//! frames are managed with [`Segment`]. //! -//! Pages can have dedicated metadata, which is implemented in the [`meta`] module. -//! The reference count and usage of a page are stored in the metadata as well, leaving -//! the handle only a pointer to the metadata. +//! There are various kinds of frames. The top-level grouping of frame kinds +//! are "typed" frames and "untyped" frames. Typed frames host Rust objects +//! that must follow the visibility, lifetime and borrow rules of Rust, thus +//! not being able to be directly manipulated. Untyped frames are raw memory +//! that can be manipulated directly. So only untyped frames can be +//! - safely shared to external entities such as device drivers or user-space +//! applications. +//! - or directly manipulated with readers and writers that neglect Rust's +//! "alias XOR mutability" rule. +//! +//! The kind of a frame is determined by the type of its metadata. Untyped +//! frames have its metadata type that implements the [`UntypedFrameMeta`] +//! trait, while typed frames don't. +//! +//! Frames can have dedicated metadata, which is implemented in the [`meta`] +//! module. The reference count and usage of a frame are stored in the metadata +//! as well, leaving the handle only a pointer to the metadata slot. Users +//! can create custom metadata types by implementing the [`AnyFrameMeta`] trait. pub mod allocator; pub mod meta; @@ -25,37 +42,42 @@ use core::{ }; use meta::{ - mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED, + mapping, AnyFrameMeta, MetaSlot, FRAME_METADATA_MAX_ALIGN, FRAME_METADATA_MAX_SIZE, + REF_COUNT_UNUSED, }; pub use segment::Segment; -use untyped::{DynUFrame, UFrameMeta}; +use untyped::{AnyUFrameMeta, UFrame}; use super::{PagingLevel, PAGE_SIZE}; use crate::mm::{Paddr, PagingConsts, Vaddr}; static MAX_PADDR: AtomicUsize = AtomicUsize::new(0); -/// A physical memory frame with a statically-known usage, whose metadata is represented by `M`. +/// A smart pointer to a frame. +/// +/// A frame is a contiguous range of bytes in physical memory. The [`Frame`] +/// type is a smart pointer to a frame that is reference-counted. +/// +/// Frames are associated with metadata. The type of the metadata `M` is +/// determines the kind of the frame. If `M` implements [`AnyUFrameMeta`], the +/// frame is a untyped frame. Otherwise, it is a typed frame. #[derive(Debug)] #[repr(transparent)] -pub struct Frame { - pub(super) ptr: *const MetaSlot, - pub(super) _marker: PhantomData, +pub struct Frame { + // TODO: We may use a `NonNull` here to make the frame a maybe-fat + // pointer and implement `CoerceUnsized` to avoid `From`s. However this is + // not quite feasible currently because we cannot cast a must-be-fat + // pointer (`*const dyn AnyFrameMeta`) to a maybe-fat pointer (`NonNull`). + ptr: *const MetaSlot, + _marker: PhantomData, } -/// A physical memory frame with a dynamically-known usage. -/// -/// The usage of this frame will not be changed while this object is alive. But the -/// usage is not known at compile time. An [`DynFrame`] as a parameter accepts any -/// type of frames. -pub type DynFrame = Frame; +unsafe impl Send for Frame {} -unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} -unsafe impl Sync for Frame {} - -impl Frame { - /// Get a `Frame` handle with a specific usage from a raw, unused page. +impl Frame { + /// Gets a [`Frame`] with a specific usage from a raw, unused page. /// /// The caller should provide the initial metadata of the page. /// @@ -68,11 +90,11 @@ impl Frame { assert!(paddr % PAGE_SIZE == 0); assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); - // Checking unsafe preconditions of the `FrameMeta` trait. - debug_assert!(size_of::() <= PAGE_METADATA_MAX_SIZE); - debug_assert!(align_of::() <= PAGE_METADATA_MAX_ALIGN); + // Checking unsafe preconditions of the `AnyFrameMeta` trait. + debug_assert!(size_of::() <= FRAME_METADATA_MAX_SIZE); + debug_assert!(align_of::() <= FRAME_METADATA_MAX_ALIGN); - let vaddr = mapping::page_to_meta::(paddr); + let vaddr = mapping::frame_to_meta::(paddr); let ptr = vaddr as *const MetaSlot; // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an @@ -88,13 +110,13 @@ impl Frame { // SAFETY: We have exclusive access to the page metadata. These fields are mutably // borrowed only once. let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() }; - vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta)); + vtable_ptr.write(core::ptr::metadata(&metadata as &dyn AnyFrameMeta)); // SAFETY: // 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the // metadata storage. // 2. The size and the alignment of the metadata storage is large enough to hold `M` - // (guaranteed by the safety requirement of the `FrameMeta` trait). + // (guaranteed by the safety requirement of the `AnyFrameMeta` trait). // 3. We have exclusive access to the metadata storage (guaranteed by the reference count). unsafe { ptr.cast::().cast_mut().write(metadata) }; @@ -109,7 +131,7 @@ impl Frame { } } - /// Get the metadata of this page. + /// Gets the metadata of this page. pub fn meta(&self) -> &M { // SAFETY: `self.ptr` points to the metadata storage which is valid to // be immutably borrowed as `M` because the type is correct, it lives @@ -119,13 +141,13 @@ impl Frame { } } -impl Frame { - /// Get the physical address. +impl Frame { + /// Gets the physical address of the start of the frame. pub fn start_paddr(&self) -> Paddr { - mapping::meta_to_page::(self.ptr as Vaddr) + mapping::meta_to_frame::(self.ptr as Vaddr) } - /// Get the paging level of this page. + /// Gets the paging level of this page. /// /// This is the level of the page table entry that maps the frame, /// which determines the size of the frame. @@ -136,15 +158,15 @@ impl Frame { 1 } - /// Size of this page in bytes. + /// Gets the size of this page in bytes. pub const fn size(&self) -> usize { PAGE_SIZE } - /// Get the dyncamically-typed metadata of this frame. + /// Gets the dyncamically-typed metadata of this frame. /// /// If the type is known at compile time, use [`Frame::meta`] instead. - pub fn dyn_meta(&self) -> &dyn FrameMeta { + pub fn dyn_meta(&self) -> &dyn AnyFrameMeta { let slot = self.slot(); // SAFETY: The page metadata is valid to be borrowed immutably, since it will never be @@ -154,7 +176,7 @@ impl Frame { // SAFETY: The page metadata is initialized and valid. let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() }; - let meta_ptr: *const dyn FrameMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr); + let meta_ptr: *const dyn AnyFrameMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr); // SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably // borrowed under `vtable_ptr` because the vtable is correct, it lives under the given @@ -162,11 +184,11 @@ impl Frame { unsafe { &*meta_ptr } } - /// Get the reference count of the page. + /// Gets the reference count of the frame. /// - /// It returns the number of all references to the page, including all the - /// existing page handles ([`Frame`], [`Frame`]), and all the mappings in the - /// page table that points to the page. + /// It returns the number of all references to the frame, including all the + /// existing frame handles ([`Frame`], [`Frame`]), and all + /// the mappings in the page table that points to the frame. /// /// # Safety /// @@ -181,13 +203,13 @@ impl Frame { unsafe { &(*self.ptr).ref_count } } - /// Forget the handle to the page. + /// Forgets the handle to the frame. /// - /// This will result in the page being leaked without calling the custom dropper. + /// This will result in the frame being leaked without calling the custom dropper. /// - /// A physical address to the page is returned in case the page needs to be + /// A physical address to the frame is returned in case the frame needs to be /// restored using [`Frame::from_raw`] later. This is useful when some architectural - /// data structures need to hold the page handle such as the page table. + /// data structures need to hold the frame handle such as the page table. #[allow(unused)] pub(in crate::mm) fn into_raw(self) -> Paddr { let paddr = self.start_paddr(); @@ -195,7 +217,7 @@ impl Frame { paddr } - /// Restore a forgotten `Frame` from a physical address. + /// Restores a forgotten `Frame` from a physical address. /// /// # Safety /// @@ -205,10 +227,10 @@ impl Frame { /// And the restoring operation should only be done once for a forgotten /// `Frame`. Otherwise double-free will happen. /// - /// Also, the caller ensures that the usage of the page is correct. There's + /// Also, the caller ensures that the usage of the frame is correct. There's /// no checking of the usage in this function. pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self { - let vaddr = mapping::page_to_meta::(paddr); + let vaddr = mapping::frame_to_meta::(paddr); let ptr = vaddr as *const MetaSlot; Self { @@ -224,9 +246,9 @@ impl Frame { } } -impl Clone for Frame { +impl Clone for Frame { fn clone(&self) -> Self { - // SAFETY: We have already held a reference to the page. + // SAFETY: We have already held a reference to the frame. unsafe { self.slot().inc_ref_count() }; Self { @@ -236,7 +258,7 @@ impl Clone for Frame { } } -impl Drop for Frame { +impl Drop for Frame { fn drop(&mut self) { let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release); debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); @@ -254,84 +276,84 @@ impl Drop for Frame { } } -impl TryFrom> for Frame { - type Error = Frame; +impl TryFrom> for Frame { + type Error = Frame; - /// Try converting a [`Frame`] into the statically-typed [`Frame`]. + /// Tries converting a [`Frame`] into the statically-typed [`Frame`]. /// - /// If the usage of the page is not the same as the expected usage, it will - /// return the dynamic page itself as is. - fn try_from(dyn_frame: Frame) -> Result { + /// If the usage of the frame is not the same as the expected usage, it will + /// return the dynamic frame itself as is. + fn try_from(dyn_frame: Frame) -> Result { if (dyn_frame.dyn_meta() as &dyn core::any::Any).is::() { // SAFETY: The metadata is coerceable and the struct is transmutable. - Ok(unsafe { core::mem::transmute::, Frame>(dyn_frame) }) + Ok(unsafe { core::mem::transmute::, Frame>(dyn_frame) }) } else { Err(dyn_frame) } } } -impl From> for Frame { +impl From> for Frame { fn from(frame: Frame) -> Self { // SAFETY: The metadata is coerceable and the struct is transmutable. unsafe { core::mem::transmute(frame) } } } -impl From> for DynUFrame { +impl From> for UFrame { fn from(frame: Frame) -> Self { // SAFETY: The metadata is coerceable and the struct is transmutable. unsafe { core::mem::transmute(frame) } } } -impl From<&Frame> for &DynUFrame { +impl From<&Frame> for &UFrame { fn from(frame: &Frame) -> Self { // SAFETY: The metadata is coerceable and the struct is transmutable. unsafe { core::mem::transmute(frame) } } } -impl From for Frame { - fn from(frame: DynUFrame) -> Self { +impl From for Frame { + fn from(frame: UFrame) -> Self { // SAFETY: The metadata is coerceable and the struct is transmutable. unsafe { core::mem::transmute(frame) } } } -impl TryFrom> for DynUFrame { - type Error = Frame; +impl TryFrom> for UFrame { + type Error = Frame; - /// Try converting a [`Frame`] into [`DynUFrame`]. + /// Tries converting a [`Frame`] into [`UFrame`]. /// - /// If the usage of the page is not the same as the expected usage, it will - /// return the dynamic page itself as is. - fn try_from(dyn_frame: Frame) -> Result { + /// If the usage of the frame is not the same as the expected usage, it will + /// return the dynamic frame itself as is. + fn try_from(dyn_frame: Frame) -> Result { if dyn_frame.dyn_meta().is_untyped() { // SAFETY: The metadata is coerceable and the struct is transmutable. - Ok(unsafe { core::mem::transmute::, DynUFrame>(dyn_frame) }) + Ok(unsafe { core::mem::transmute::, UFrame>(dyn_frame) }) } else { Err(dyn_frame) } } } -/// Increases the reference count of the page by one. +/// Increases the reference count of the frame by one. /// /// # Safety /// /// The caller should ensure the following conditions: -/// 1. The physical address must represent a valid page; -/// 2. The caller must have already held a reference to the page. -pub(in crate::mm) unsafe fn inc_page_ref_count(paddr: Paddr) { +/// 1. The physical address must represent a valid frame; +/// 2. The caller must have already held a reference to the frame. +pub(in crate::mm) unsafe fn inc_frame_ref_count(paddr: Paddr) { debug_assert!(paddr % PAGE_SIZE == 0); debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); - let vaddr: Vaddr = mapping::page_to_meta::(paddr); + let vaddr: Vaddr = mapping::frame_to_meta::(paddr); // SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking // an immutable reference to it is always safe. let slot = unsafe { &*(vaddr as *const MetaSlot) }; - // SAFETY: We have already held a reference to the page. + // SAFETY: We have already held a reference to the frame. unsafe { slot.inc_ref_count() }; } diff --git a/ostd/src/mm/frame/segment.rs b/ostd/src/mm/frame/segment.rs index 77da52213..6e41036ff 100644 --- a/ostd/src/mm/frame/segment.rs +++ b/ostd/src/mm/frame/segment.rs @@ -1,66 +1,57 @@ // SPDX-License-Identifier: MPL-2.0 -//! A contiguous range of pages. +//! A contiguous range of frames. use core::{mem::ManuallyDrop, ops::Range}; -use super::{inc_page_ref_count, meta::FrameMeta, Frame}; -use crate::mm::{Paddr, UFrameMeta, PAGE_SIZE}; +use super::{inc_frame_ref_count, meta::AnyFrameMeta, Frame}; +use crate::mm::{AnyUFrameMeta, Paddr, PAGE_SIZE}; -/// A contiguous range of homogeneous physical memory pages. +/// A contiguous range of homogeneous physical memory frames. /// -/// This is a handle to many contiguous pages. It will be more lightweight -/// than owning an array of page handles. +/// This is a handle to multiple contiguous frames. It will be more lightweight +/// than owning an array of frame handles. /// -/// The ownership is achieved by the reference counting mechanism of pages. -/// When constructing a `Segment`, the page handles are created then -/// forgotten, leaving the reference count. When dropping a it, the page +/// The ownership is achieved by the reference counting mechanism of frames. +/// When constructing a [`Segment`], the frame handles are created then +/// forgotten, leaving the reference count. When dropping a it, the frame /// handles are restored and dropped, decrementing the reference count. /// -/// All the metadata of the pages are homogeneous, i.e., they are of the same +/// All the metadata of the frames are homogeneous, i.e., they are of the same /// type. #[derive(Debug)] #[repr(transparent)] -pub struct Segment { +pub struct Segment { range: Range, _marker: core::marker::PhantomData, } -/// A contiguous range of homogeneous physical memory frames that have any metadata. -/// -/// In other words, the metadata of the frames are of the same type but the type -/// is not known at compile time. An [`DynSegment`] as a parameter accepts any -/// type of segments. -/// -/// The usage of this frame will not be changed while this object is alive. -pub type DynSegment = Segment; - -/// A contiguous range of homogeneous untyped physical memory pages that have any metadata. +/// A contiguous range of homogeneous untyped physical memory frames that have any metadata. /// /// In other words, the metadata of the frames are of the same type, and they /// are untyped, but the type of metadata is not known at compile time. An -/// [`DynUSegment`] as a parameter accepts any untyped segments. +/// [`USegment`] as a parameter accepts any untyped segments. /// /// The usage of this frame will not be changed while this object is alive. -pub type DynUSegment = Segment; +pub type USegment = Segment; -impl Drop for Segment { +impl Drop for Segment { fn drop(&mut self) { for paddr in self.range.clone().step_by(PAGE_SIZE) { - // SAFETY: for each page there would be a forgotten handle + // SAFETY: for each frame there would be a forgotten handle // when creating the `Segment` object. drop(unsafe { Frame::::from_raw(paddr) }); } } } -impl Clone for Segment { +impl Clone for Segment { fn clone(&self) -> Self { for paddr in self.range.clone().step_by(PAGE_SIZE) { - // SAFETY: for each page there would be a forgotten handle + // SAFETY: for each frame there would be a forgotten handle // when creating the `Segment` object, so we already have - // reference counts for the pages. - unsafe { inc_page_ref_count(paddr) }; + // reference counts for the frames. + unsafe { inc_frame_ref_count(paddr) }; } Self { range: self.range.clone(), @@ -69,18 +60,18 @@ impl Clone for Segment { } } -impl Segment { - /// Creates a new `Segment` from unused pages. +impl Segment { + /// Creates a new [`Segment`] from unused frames. /// - /// The caller must provide a closure to initialize metadata for all the pages. - /// The closure receives the physical address of the page and returns the + /// The caller must provide a closure to initialize metadata for all the frames. + /// The closure receives the physical address of the frame and returns the /// metadata, which is similar to [`core::array::from_fn`]. /// /// # Panics /// /// The function panics if: /// - the physical address is invalid or not aligned; - /// - any of the pages are already in use. + /// - any of the frames are already in use. pub fn from_unused(range: Range, mut metadata_fn: F) -> Self where F: FnMut(Paddr) -> M, @@ -95,26 +86,26 @@ impl Segment { } } -impl Segment { - /// Gets the start physical address of the contiguous pages. +impl Segment { + /// Gets the start physical address of the contiguous frames. pub fn start_paddr(&self) -> Paddr { self.range.start } - /// Gets the end physical address of the contiguous pages. + /// Gets the end physical address of the contiguous frames. pub fn end_paddr(&self) -> Paddr { self.range.end } - /// Gets the length in bytes of the contiguous pages. + /// Gets the length in bytes of the contiguous frames. pub fn size(&self) -> usize { self.range.end - self.range.start } - /// Splits the pages into two at the given byte offset from the start. + /// Splits the frames into two at the given byte offset from the start. /// - /// The resulting pages cannot be empty. So the offset cannot be neither - /// zero nor the length of the pages. + /// The resulting frames cannot be empty. So the offset cannot be neither + /// zero nor the length of the frames. /// /// # Panics /// @@ -139,10 +130,10 @@ impl Segment { ) } - /// Gets an extra handle to the pages in the byte offset range. + /// Gets an extra handle to the frames in the byte offset range. /// /// The sliced byte offset range in indexed by the offset from the start of - /// the contiguous pages. The resulting pages holds extra reference counts. + /// the contiguous frames. The resulting frames holds extra reference counts. /// /// # Panics /// @@ -155,10 +146,10 @@ impl Segment { assert!(start <= end && end <= self.range.end); for paddr in (start..end).step_by(PAGE_SIZE) { - // SAFETY: We already have reference counts for the pages since - // for each page there would be a forgotten handle when creating + // SAFETY: We already have reference counts for the frames since + // for each frame there would be a forgotten handle when creating // the `Segment` object. - unsafe { inc_page_ref_count(paddr) }; + unsafe { inc_frame_ref_count(paddr) }; } Self { @@ -168,10 +159,10 @@ impl Segment { } } -impl From> for Segment { - fn from(page: Frame) -> Self { - let pa = page.start_paddr(); - let _ = ManuallyDrop::new(page); +impl From> for Segment { + fn from(frame: Frame) -> Self { + let pa = frame.start_paddr(); + let _ = ManuallyDrop::new(frame); Self { range: pa..pa + PAGE_SIZE, _marker: core::marker::PhantomData, @@ -179,25 +170,25 @@ impl From> for Segment { } } -impl Iterator for Segment { +impl Iterator for Segment { type Item = Frame; fn next(&mut self) -> Option { if self.range.start < self.range.end { - // SAFETY: each page in the range would be a handle forgotten + // SAFETY: each frame in the range would be a handle forgotten // when creating the `Segment` object. - let page = unsafe { Frame::::from_raw(self.range.start) }; + let frame = unsafe { Frame::::from_raw(self.range.start) }; self.range.start += PAGE_SIZE; // The end cannot be non-page-aligned. debug_assert!(self.range.start <= self.range.end); - Some(page) + Some(frame) } else { None } } } -impl From> for DynSegment { +impl From> for Segment { fn from(seg: Segment) -> Self { let seg = ManuallyDrop::new(seg); Self { @@ -207,13 +198,13 @@ impl From> for DynSegment { } } -impl TryFrom for Segment { - type Error = DynSegment; +impl TryFrom> for Segment { + type Error = Segment; - fn try_from(seg: DynSegment) -> core::result::Result { + fn try_from(seg: Segment) -> core::result::Result { // SAFETY: for each page there would be a forgotten handle // when creating the `Segment` object. - let first_frame = unsafe { Frame::::from_raw(seg.range.start) }; + let first_frame = unsafe { Frame::::from_raw(seg.range.start) }; let first_frame = ManuallyDrop::new(first_frame); if !(first_frame.dyn_meta() as &dyn core::any::Any).is::() { return Err(seg); @@ -223,41 +214,41 @@ impl TryFrom for Segment { #[cfg(debug_assertions)] { for paddr in seg.range.clone().step_by(PAGE_SIZE) { - let frame = unsafe { Frame::::from_raw(paddr) }; + let frame = unsafe { Frame::::from_raw(paddr) }; let frame = ManuallyDrop::new(frame); debug_assert!((frame.dyn_meta() as &dyn core::any::Any).is::()); } } // SAFETY: The metadata is coerceable and the struct is transmutable. - Ok(unsafe { core::mem::transmute::>(seg) }) + Ok(unsafe { core::mem::transmute::, Segment>(seg) }) } } -impl From> for DynUSegment { +impl From> for USegment { fn from(seg: Segment) -> Self { // SAFETY: The metadata is coerceable and the struct is transmutable. unsafe { core::mem::transmute(seg) } } } -impl From<&Segment> for &DynUSegment { +impl From<&Segment> for &USegment { fn from(seg: &Segment) -> Self { // SAFETY: The metadata is coerceable and the struct is transmutable. unsafe { core::mem::transmute(seg) } } } -impl TryFrom for DynUSegment { - type Error = DynSegment; +impl TryFrom> for USegment { + type Error = Segment; - /// Try converting a [`DynSegment`] into [`DynUSegment`]. + /// Try converting a [`Segment`] into [`USegment`]. /// /// If the usage of the page is not the same as the expected usage, it will /// return the dynamic page itself as is. - fn try_from(seg: DynSegment) -> core::result::Result { + fn try_from(seg: Segment) -> core::result::Result { // SAFETY: for each page there would be a forgotten handle // when creating the `Segment` object. - let first_frame = unsafe { Frame::::from_raw(seg.range.start) }; + let first_frame = unsafe { Frame::::from_raw(seg.range.start) }; let first_frame = ManuallyDrop::new(first_frame); if !first_frame.dyn_meta().is_untyped() { return Err(seg); @@ -267,12 +258,12 @@ impl TryFrom for DynUSegment { #[cfg(debug_assertions)] { for paddr in seg.range.clone().step_by(PAGE_SIZE) { - let frame = unsafe { Frame::::from_raw(paddr) }; + let frame = unsafe { Frame::::from_raw(paddr) }; let frame = ManuallyDrop::new(frame); debug_assert!(frame.dyn_meta().is_untyped()); } } // SAFETY: The metadata is coerceable and the struct is transmutable. - Ok(unsafe { core::mem::transmute::(seg) }) + Ok(unsafe { core::mem::transmute::, USegment>(seg) }) } } diff --git a/ostd/src/mm/frame/untyped.rs b/ostd/src/mm/frame/untyped.rs index 1ecb860ff..6ce4e1bca 100644 --- a/ostd/src/mm/frame/untyped.rs +++ b/ostd/src/mm/frame/untyped.rs @@ -2,13 +2,12 @@ //! Untyped physical memory management. //! -//! A frame is a special page that is _untyped_ memory. -//! It is used to store data irrelevant to the integrity of the kernel. -//! All pages mapped to the virtual address space of the users are backed by -//! frames. Frames, with all the properties of pages, can additionally be safely -//! read and written by the kernel or the user. +//! As detailed in [`crate::mm::frame`], untyped memory can be accessed with +//! relaxed rules but we cannot create references to them. This module provides +//! the declaration of untyped frames and segments, and the implementation of +//! extra functionalities (such as [`VmIo`]) for them. -use super::{meta::FrameMeta, Frame, Segment}; +use super::{meta::AnyFrameMeta, Frame, Segment}; use crate::{ mm::{ io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter}, @@ -19,24 +18,25 @@ use crate::{ /// The metadata of untyped frame. /// -/// If a structure `M` implements [`UFrameMeta`], it can be used as the +/// If a structure `M` implements [`AnyUFrameMeta`], it can be used as the /// metadata of a type of untyped frames [`Frame`]. All frames of such type /// will be accessible as untyped memory. -pub trait UFrameMeta: FrameMeta {} +pub trait AnyUFrameMeta: AnyFrameMeta {} -/// An untyped frame with any metadata. +/// A smart pointer to an untyped frame with any metadata. +/// +/// The metadata of the frame is not known at compile time but the frame must +/// be an untyped one. An [`UFrame`] as a parameter accepts any type of +/// untyped frame metadata. /// /// The usage of this frame will not be changed while this object is alive. -/// The metadata of the frame is not known at compile time but the frame must -/// be an untyped one. An [`DynUFrame`] as a parameter accepts any type of -/// untyped frame metadata. -pub type DynUFrame = Frame; +pub type UFrame = Frame; /// Makes a structure usable as untyped frame metadata. /// -/// Directly implementing [`FrameMeta`] is not safe since the size and +/// Directly implementing [`AnyFrameMeta`] is not safe since the size and /// alignment must be checked. This macro provides a safe way to implement both -/// [`FrameMeta`] and [`UFrameMeta`] with compile-time checks. +/// [`AnyFrameMeta`] and [`AnyUFrameMeta`] with compile-time checks. /// /// If this macro is used for built-in typed frame metadata, it won't compile. #[macro_export] @@ -44,25 +44,25 @@ macro_rules! impl_untyped_frame_meta_for { // Implement without specifying the drop behavior. ($t:ty) => { use static_assertions::const_assert; - const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); - const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); + const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE); + const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN); // SAFETY: The size and alignment of the structure are checked. - unsafe impl $crate::mm::frame::meta::FrameMeta for $t { + unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t { fn is_untyped(&self) -> bool { true } } - impl $crate::mm::frame::untyped::UFrameMeta for $t {} + impl $crate::mm::frame::untyped::AnyUFrameMeta for $t {} }; // Implement with a customized drop function. ($t:ty, $body:expr) => { use static_assertions::const_assert; - const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); - const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); + const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE); + const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN); // SAFETY: The size and alignment of the structure are checked. // Outside OSTD the user cannot implement a `on_drop` method for typed // frames. And untyped frames can be safely read. - unsafe impl $crate::mm::frame::meta::FrameMeta for $t { + unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t { fn on_drop(&mut self, reader: &mut $crate::mm::VmReader<$crate::mm::Infallible>) { $body } @@ -71,7 +71,7 @@ macro_rules! impl_untyped_frame_meta_for { true } } - impl $crate::mm::frame::untyped::UFrameMeta for $t {} + impl $crate::mm::frame::untyped::AnyUFrameMeta for $t {} }; } @@ -91,7 +91,7 @@ pub trait UntypedMem { macro_rules! impl_untyped_for { ($t:ident) => { - impl UntypedMem for $t { + impl UntypedMem for $t { fn reader(&self) -> VmReader<'_, Infallible> { let ptr = paddr_to_vaddr(self.start_paddr()) as *const u8; // SAFETY: Only untyped frames are allowed to be read. @@ -105,7 +105,7 @@ macro_rules! impl_untyped_for { } } - impl VmIo for $t { + impl VmIo for $t { fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { let read_len = writer.avail().min(self.size().saturating_sub(offset)); // Do bound check with potential integer overflow in mind @@ -151,12 +151,12 @@ use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref}; /// `FrameRef` is a struct that can work as `&'a Frame`. /// /// This is solely useful for [`crate::collections::xarray`]. -pub struct FrameRef<'a, M: UFrameMeta + ?Sized> { +pub struct FrameRef<'a, M: AnyUFrameMeta + ?Sized> { inner: ManuallyDrop>, _marker: PhantomData<&'a Frame>, } -impl Deref for FrameRef<'_, M> { +impl Deref for FrameRef<'_, M> { type Target = Frame; fn deref(&self) -> &Self::Target { @@ -166,7 +166,7 @@ impl Deref for FrameRef<'_, M> { // SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer. // The pointer is also aligned to 4. -unsafe impl xarray::ItemEntry for Frame { +unsafe impl xarray::ItemEntry for Frame { type Ref<'a> = FrameRef<'a, M> where diff --git a/ostd/src/mm/heap_allocator/mod.rs b/ostd/src/mm/heap_allocator/mod.rs index c42beaa08..0d71137de 100644 --- a/ostd/src/mm/heap_allocator/mod.rs +++ b/ostd/src/mm/heap_allocator/mod.rs @@ -11,7 +11,7 @@ use spin::Once; use super::paddr_to_vaddr; use crate::{ - mm::{frame::allocator::PAGE_ALLOCATOR, PAGE_SIZE}, + mm::{frame::allocator::FRAME_ALLOCATOR, PAGE_SIZE}, prelude::*, sync::SpinLock, trap::disable_local, @@ -94,7 +94,7 @@ impl LockedHeapWithRescue { }; let allocation_start = { - let mut page_allocator = PAGE_ALLOCATOR.get().unwrap().lock(); + let mut page_allocator = FRAME_ALLOCATOR.get().unwrap().lock(); if num_frames >= MIN_NUM_FRAMES { page_allocator.alloc(num_frames).ok_or(Error::NoMemory)? } else { diff --git a/ostd/src/mm/io.rs b/ostd/src/mm/io.rs index 23083ddb7..62b729fb4 100644 --- a/ostd/src/mm/io.rs +++ b/ostd/src/mm/io.rs @@ -7,11 +7,11 @@ //! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and //! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_. //! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory -//! (e.g., `&[u8]`) or untyped memory (e.g, [`DynUFrame`]). Behind the scene, `VmReader` and `VmWriter` +//! (e.g., `&[u8]`) or untyped memory (e.g, [`UFrame`]). Behind the scene, `VmReader` and `VmWriter` //! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose //! safety depends on whether the given memory regions are _valid_ or not. //! -//! [`DynUFrame`]: crate::mm::DynUFrame +//! [`UFrame`]: crate::mm::UFrame //! [`from_user_space`]: `VmReader::from_user_space` //! [`from_kernel_space`]: `VmReader::from_kernel_space` //! @@ -58,7 +58,7 @@ use crate::{ }; /// A trait that enables reading/writing data from/to a VM object, -/// e.g., [`DynUSegment`], [`Vec`] and [`DynUFrame`]. +/// e.g., [`USegment`], [`Vec`] and [`UFrame`]. /// /// # Concurrency /// @@ -67,8 +67,8 @@ use crate::{ /// desire predictability or atomicity, the users should add extra mechanism /// for such properties. /// -/// [`DynUSegment`]: crate::mm::DynUSegment -/// [`DynUFrame`]: crate::mm::DynUFrame +/// [`USegment`]: crate::mm::USegment +/// [`UFrame`]: crate::mm::UFrame pub trait VmIo: Send + Sync { /// Reads requested data at a specified offset into a given `VmWriter`. /// diff --git a/ostd/src/mm/kspace/kvirt_area.rs b/ostd/src/mm/kspace/kvirt_area.rs index 338d35bb3..d12c8ea54 100644 --- a/ostd/src/mm/kspace/kvirt_area.rs +++ b/ostd/src/mm/kspace/kvirt_area.rs @@ -11,7 +11,7 @@ use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE}; use crate::{ cpu::CpuSet, mm::{ - frame::{meta::FrameMeta, Frame}, + frame::{meta::AnyFrameMeta, Frame}, page_prop::PageProperty, page_table::PageTableItem, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, @@ -204,7 +204,7 @@ impl KVirtArea { impl KVirtArea { /// Maps pages into the kernel virtual area. - pub fn map_pages( + pub fn map_pages( &mut self, range: Range, pages: impl Iterator>, @@ -232,7 +232,7 @@ impl KVirtArea { /// /// This function returns None if the address is not mapped (`NotMapped`), /// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page. - pub fn get_page(&self, addr: Vaddr) -> Option> { + pub fn get_page(&self, addr: Vaddr) -> Option> { let query_result = self.query_page(addr); match query_result { PageTableItem::Mapped { diff --git a/ostd/src/mm/kspace/mod.rs b/ostd/src/mm/kspace/mod.rs index 197e1ee66..ce38ef45d 100644 --- a/ostd/src/mm/kspace/mod.rs +++ b/ostd/src/mm/kspace/mod.rs @@ -163,7 +163,7 @@ pub fn init_kernel_page_table(meta_pages: Segment) { // Map the metadata pages. { - let start_va = mapping::page_to_meta::(0); + let start_va = mapping::frame_to_meta::(0); let from = start_va..start_va + meta_pages.size(); let prop = PageProperty { flags: PageFlags::RW, diff --git a/ostd/src/mm/mod.rs b/ostd/src/mm/mod.rs index 638a6b942..1e5b17f63 100644 --- a/ostd/src/mm/mod.rs +++ b/ostd/src/mm/mod.rs @@ -26,9 +26,9 @@ pub use self::{ dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr}, frame::{ allocator::FrameAllocOptions, - segment::{DynSegment, DynUSegment, Segment}, - untyped::{DynUFrame, UFrameMeta, UntypedMem}, - DynFrame, Frame, + segment::{Segment, USegment}, + untyped::{AnyUFrameMeta, UFrame, UntypedMem}, + Frame, }, io::{ Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader, diff --git a/ostd/src/mm/page_table/boot_pt.rs b/ostd/src/mm/page_table/boot_pt.rs index fe1710579..3b670f52b 100644 --- a/ostd/src/mm/page_table/boot_pt.rs +++ b/ostd/src/mm/page_table/boot_pt.rs @@ -16,8 +16,8 @@ use crate::{ cpu::num_cpus, cpu_local_cell, mm::{ - frame::allocator::PAGE_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, PageProperty, - PagingConstsTrait, Vaddr, PAGE_SIZE, + frame::allocator::FRAME_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, + PageProperty, PagingConstsTrait, Vaddr, PAGE_SIZE, }, sync::SpinLock, }; @@ -221,7 +221,7 @@ impl BootPageTable { } fn alloc_frame(&mut self) -> FrameNumber { - let frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); + let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); self.frames.push(frame); // Zero it out. let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8; @@ -233,7 +233,7 @@ impl BootPageTable { impl Drop for BootPageTable { fn drop(&mut self) { for frame in &self.frames { - PAGE_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1); + FRAME_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1); } } } diff --git a/ostd/src/mm/page_table/cursor.rs b/ostd/src/mm/page_table/cursor.rs index ebed0e60e..a0a73e1d0 100644 --- a/ostd/src/mm/page_table/cursor.rs +++ b/ostd/src/mm/page_table/cursor.rs @@ -76,7 +76,7 @@ use super::{ }; use crate::{ mm::{ - frame::{meta::FrameMeta, Frame}, + frame::{meta::AnyFrameMeta, Frame}, kspace::should_map_as_tracked, paddr_to_vaddr, Paddr, PageProperty, Vaddr, }, @@ -91,7 +91,7 @@ pub enum PageTableItem { }, Mapped { va: Vaddr, - page: Frame, + page: Frame, prop: PageProperty, }, #[allow(dead_code)] @@ -402,9 +402,9 @@ where self.0.query() } - /// Maps the range starting from the current address to a [`Frame`]. + /// Maps the range starting from the current address to a [`Frame`]. /// - /// It returns the previously mapped [`Frame`] if that exists. + /// It returns the previously mapped [`Frame`] if that exists. /// /// # Panics /// @@ -419,9 +419,9 @@ where /// not affect kernel's memory safety. pub unsafe fn map( &mut self, - page: Frame, + page: Frame, prop: PageProperty, - ) -> Option> { + ) -> Option> { let end = self.0.va + page.size(); assert!(end <= self.0.barrier_va.end); diff --git a/ostd/src/mm/page_table/node/child.rs b/ostd/src/mm/page_table/node/child.rs index 2d046731f..54a536019 100644 --- a/ostd/src/mm/page_table/node/child.rs +++ b/ostd/src/mm/page_table/node/child.rs @@ -8,7 +8,7 @@ use super::{MapTrackingStatus, PageTableEntryTrait, RawPageTableNode}; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, mm::{ - frame::{inc_page_ref_count, meta::FrameMeta, Frame}, + frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame}, page_prop::PageProperty, Paddr, PagingConstsTrait, PagingLevel, }, @@ -27,7 +27,7 @@ pub(in crate::mm) enum Child< [(); C::NR_LEVELS as usize]:, { PageTable(RawPageTableNode), - Frame(Frame, PageProperty), + Frame(Frame, PageProperty), /// Pages not tracked by handles. Untracked(Paddr, PagingLevel, PageProperty), None, @@ -119,7 +119,7 @@ where match is_tracked { MapTrackingStatus::Tracked => { // SAFETY: The physical address points to a valid page. - let page = unsafe { Frame::::from_raw(paddr) }; + let page = unsafe { Frame::::from_raw(paddr) }; Child::Frame(page, pte.prop()) } MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), @@ -150,7 +150,7 @@ where if !pte.is_last(level) { // SAFETY: The physical address is valid and the PTE already owns // the reference to the page. - unsafe { inc_page_ref_count(paddr) }; + unsafe { inc_frame_ref_count(paddr) }; // SAFETY: The physical address points to a valid page table node // at the given level. return Child::PageTable(unsafe { RawPageTableNode::from_raw_parts(paddr, level - 1) }); @@ -160,9 +160,9 @@ where MapTrackingStatus::Tracked => { // SAFETY: The physical address is valid and the PTE already owns // the reference to the page. - unsafe { inc_page_ref_count(paddr) }; + unsafe { inc_frame_ref_count(paddr) }; // SAFETY: The physical address points to a valid page. - let page = unsafe { Frame::::from_raw(paddr) }; + let page = unsafe { Frame::::from_raw(paddr) }; Child::Frame(page, pte.prop()) } MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), diff --git a/ostd/src/mm/page_table/node/mod.rs b/ostd/src/mm/page_table/node/mod.rs index 0439c263b..0f78e80fa 100644 --- a/ostd/src/mm/page_table/node/mod.rs +++ b/ostd/src/mm/page_table/node/mod.rs @@ -40,7 +40,7 @@ use super::{nr_subpage_per_huge, PageTableEntryTrait}; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, mm::{ - frame::{inc_page_ref_count, meta::FrameMeta, Frame}, + frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame}, paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel, VmReader, }, @@ -166,7 +166,7 @@ where // SAFETY: We have a reference count to the page and can safely increase the reference // count by one more. unsafe { - inc_page_ref_count(self.paddr()); + inc_frame_ref_count(self.paddr()); } } @@ -405,7 +405,7 @@ where // SAFETY: The layout of the `PageTablePageMeta` is ensured to be the same for // all possible generic parameters. And the layout fits the requirements. -unsafe impl FrameMeta for PageTablePageMeta +unsafe impl AnyFrameMeta for PageTablePageMeta where [(); C::NR_LEVELS as usize]:, { @@ -434,7 +434,7 @@ where } else if is_tracked == MapTrackingStatus::Tracked { // SAFETY: The PTE points to a tracked page. The ownership // of the child is transferred to the child then dropped. - drop(unsafe { Frame::::from_raw(paddr) }); + drop(unsafe { Frame::::from_raw(paddr) }); } } } diff --git a/ostd/src/mm/stat/mod.rs b/ostd/src/mm/stat/mod.rs index afc8105b9..594486f3e 100644 --- a/ostd/src/mm/stat/mod.rs +++ b/ostd/src/mm/stat/mod.rs @@ -2,7 +2,7 @@ //! APIs for memory statistics. -use crate::mm::frame::allocator::PAGE_ALLOCATOR; +use crate::mm::frame::allocator::FRAME_ALLOCATOR; /// Total memory available for any usages in the system (in bytes). /// @@ -10,12 +10,12 @@ use crate::mm::frame::allocator::PAGE_ALLOCATOR; /// in most occasions. For example, bad memory, kernel statically-allocated /// memory or firmware reserved memories do not count. pub fn mem_total() -> usize { - PAGE_ALLOCATOR.get().unwrap().lock().mem_total() + FRAME_ALLOCATOR.get().unwrap().lock().mem_total() } /// Current readily available memory (in bytes). /// /// Such memory can be directly used for allocation without reclaiming. pub fn mem_available() -> usize { - PAGE_ALLOCATOR.get().unwrap().lock().mem_available() + FRAME_ALLOCATOR.get().unwrap().lock().mem_available() } diff --git a/ostd/src/mm/tlb.rs b/ostd/src/mm/tlb.rs index 1cc386fdc..489201cea 100644 --- a/ostd/src/mm/tlb.rs +++ b/ostd/src/mm/tlb.rs @@ -6,7 +6,7 @@ use alloc::vec::Vec; use core::ops::Range; use super::{ - frame::{meta::FrameMeta, Frame}, + frame::{meta::AnyFrameMeta, Frame}, Vaddr, PAGE_SIZE, }; use crate::{ @@ -80,7 +80,7 @@ impl TlbFlusher { /// flushed. Otherwise if the page is recycled for other purposes, the user /// space program can still access the page through the TLB entries. This /// method is designed to be used in such cases. - pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: Frame) { + pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: Frame) { self.issue_tlb_flush_(op, Some(drop_after_flush)); } @@ -94,7 +94,7 @@ impl TlbFlusher { self.need_self_flush } - fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option>) { + fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option>) { let op = op.optimize_for_large_range(); // Fast path for single CPU cases. @@ -159,7 +159,7 @@ impl TlbFlushOp { // Lock ordering: lock FLUSH_OPS before PAGE_KEEPER. cpu_local! { static FLUSH_OPS: SpinLock = SpinLock::new(OpsStack::new()); - static PAGE_KEEPER: SpinLock>, LocalIrqDisabled> = SpinLock::new(Vec::new()); + static PAGE_KEEPER: SpinLock>, LocalIrqDisabled> = SpinLock::new(Vec::new()); } fn do_remote_flush() { diff --git a/ostd/src/mm/vm_space.rs b/ostd/src/mm/vm_space.rs index e14c901e2..8dadd41e0 100644 --- a/ostd/src/mm/vm_space.rs +++ b/ostd/src/mm/vm_space.rs @@ -22,7 +22,7 @@ use crate::{ kspace::KERNEL_PAGE_TABLE, page_table::{self, PageTable, PageTableItem, UserMode}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, - DynUFrame, PageProperty, VmReader, VmWriter, MAX_USERSPACE_VADDR, + PageProperty, UFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR, }, prelude::*, sync::{PreemptDisabled, RwLock, RwLockReadGuard}, @@ -40,7 +40,7 @@ use crate::{ /// /// A newly-created `VmSpace` is not backed by any physical memory pages. To /// provide memory pages for a `VmSpace`, one can allocate and map physical -/// memory ([`DynUFrame`]s) to the `VmSpace` using the cursor. +/// memory ([`UFrame`]s) to the `VmSpace` using the cursor. /// /// A `VmSpace` can also attach a page fault handler, which will be invoked to /// handle page faults generated from user space. @@ -323,7 +323,7 @@ impl CursorMut<'_, '_> { /// Map a frame into the current slot. /// /// This method will bring the cursor to the next slot after the modification. - pub fn map(&mut self, frame: DynUFrame, prop: PageProperty) { + pub fn map(&mut self, frame: UFrame, prop: PageProperty) { let start_va = self.virt_addr(); // SAFETY: It is safe to map untyped memory into the userspace. let old = unsafe { self.pt_cursor.map(frame.into(), prop) }; @@ -475,7 +475,7 @@ pub enum VmItem { /// The virtual address of the slot. va: Vaddr, /// The mapped frame. - frame: DynUFrame, + frame: UFrame, /// The property of the slot. prop: PageProperty, },