diff --git a/kernel/comps/block/src/bio.rs b/kernel/comps/block/src/bio.rs index 7540d3b42..181ec16cd 100644 --- a/kernel/comps/block/src/bio.rs +++ b/kernel/comps/block/src/bio.rs @@ -5,8 +5,8 @@ use bitvec::array::BitArray; use int_to_c_enum::TryFromInt; use ostd::{ mm::{ - DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, Segment, VmIo, - VmReader, VmWriter, + DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, UntypedSegment, + VmIo, VmReader, VmWriter, }, sync::{SpinLock, WaitQueue}, Error, @@ -442,8 +442,8 @@ impl<'a> BioSegment { } } - /// Constructs a new `BioSegment` with a given `Segment` and the bio direction. - pub fn new_from_segment(segment: Segment, direction: BioDirection) -> Self { + /// Constructs a new `BioSegment` with a given `UntypedSegment` and the bio direction. + pub fn new_from_segment(segment: UntypedSegment, direction: BioDirection) -> Self { let len = segment.nbytes(); let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap(); Self { @@ -481,7 +481,7 @@ impl<'a> BioSegment { /// Returns the inner VM segment. #[cfg(ktest)] - pub fn inner_segment(&self) -> &Segment { + pub fn inner_segment(&self) -> &UntypedSegment { self.inner.dma_slice.stream().vm_segment() } diff --git a/kernel/comps/virtio/src/queue.rs b/kernel/comps/virtio/src/queue.rs index 6ff136635..9bc92b16a 100644 --- a/kernel/comps/virtio/src/queue.rs +++ b/kernel/comps/virtio/src/queue.rs @@ -76,7 +76,7 @@ impl VirtQueue { } let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() { - // Currently, we use one Frame to place the descriptors and available rings, one Frame to place used rings + // Currently, we use one UntypedFrame to place the descriptors and available rings, one UntypedFrame to place used rings // because the virtio-mmio legacy required the address to be continuous. The max queue size is 128. if size > 128 { return Err(QueueError::InvalidArgs); diff --git a/kernel/libs/aster-util/src/safe_ptr.rs b/kernel/libs/aster-util/src/safe_ptr.rs index 19b7605c5..cf7574a8b 100644 --- a/kernel/libs/aster-util/src/safe_ptr.rs +++ b/kernel/libs/aster-util/src/safe_ptr.rs @@ -54,7 +54,7 @@ use ostd::{ /// /// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo` /// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and -/// `Frame`. The blanket implementations of `VmIo` also include pointer-like +/// `UntypedFrame`. The blanket implementations of `VmIo` also include pointer-like /// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box`, /// and `Arc`. /// diff --git a/kernel/libs/aster-util/src/segment_slice.rs b/kernel/libs/aster-util/src/segment_slice.rs index 5589fa60d..f72ea90cd 100644 --- a/kernel/libs/aster-util/src/segment_slice.rs +++ b/kernel/libs/aster-util/src/segment_slice.rs @@ -2,41 +2,41 @@ // SPDX-License-Identifier: MPL-2.0 -//! Provides [`SegmentSlice`] for quick duplication and slicing over [`Segment`]. +//! Provides [`SegmentSlice`] for quick duplication and slicing over [`UntypedSegment`]. use alloc::sync::Arc; use core::ops::Range; use ostd::{ mm::{ - FallibleVmRead, FallibleVmWrite, Frame, Infallible, Paddr, Segment, VmIo, VmReader, - VmWriter, PAGE_SIZE, + FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedFrame, UntypedSegment, VmIo, + VmReader, VmWriter, PAGE_SIZE, }, Error, Result, }; -/// A reference to a slice of a [`Segment`]. +/// A reference to a slice of a [`UntypedSegment`]. /// /// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference -/// count. While cloning a [`Segment`] will increment the reference count of +/// count. While cloning a [`UntypedSegment`] will increment the reference count of /// many underlying pages. /// /// The downside is that the [`SegmentSlice`] requires heap allocation. Also, -/// if any [`SegmentSlice`] of the original [`Segment`] is alive, all pages in -/// the original [`Segment`], including the pages that are not referenced, will +/// if any [`SegmentSlice`] of the original [`UntypedSegment`] is alive, all pages in +/// the original [`UntypedSegment`], including the pages that are not referenced, will /// not be freed. #[derive(Debug, Clone)] pub struct SegmentSlice { - inner: Arc, + inner: Arc, range: Range, } impl SegmentSlice { - /// Returns a part of the `Segment`. + /// Returns a part of the `UntypedSegment`. /// /// # Panics /// - /// If `range` is not within the range of this `Segment`, + /// If `range` is not within the range of this `UntypedSegment`, /// then the method panics. pub fn range(&self, range: Range) -> Self { let orig_range = &self.range; @@ -124,8 +124,8 @@ impl VmIo for SegmentSlice { } } -impl From for SegmentSlice { - fn from(segment: Segment) -> Self { +impl From for SegmentSlice { + fn from(segment: UntypedSegment) -> Self { let range = 0..segment.nbytes() / PAGE_SIZE; Self { inner: Arc::new(segment), @@ -134,7 +134,7 @@ impl From for SegmentSlice { } } -impl From for Segment { +impl From for UntypedSegment { fn from(slice: SegmentSlice) -> Self { let start = slice.range.start * PAGE_SIZE; let end = slice.range.end * PAGE_SIZE; @@ -142,8 +142,8 @@ impl From for Segment { } } -impl From for SegmentSlice { - fn from(frame: Frame) -> Self { - SegmentSlice::from(Segment::from(frame)) +impl From for SegmentSlice { + fn from(frame: UntypedFrame) -> Self { + SegmentSlice::from(UntypedSegment::from(frame)) } } diff --git a/kernel/src/fs/exfat/fs.rs b/kernel/src/fs/exfat/fs.rs index 3af146538..26e3ab840 100644 --- a/kernel/src/fs/exfat/fs.rs +++ b/kernel/src/fs/exfat/fs.rs @@ -12,7 +12,7 @@ use aster_block::{ }; use hashbrown::HashMap; use lru::LruCache; -use ostd::mm::Frame; +use ostd::mm::UntypedFrame; pub(super) use ostd::mm::VmIo; use super::{ @@ -368,7 +368,7 @@ impl ExfatFS { } impl PageCacheBackend for ExfatFS { - fn read_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid read size") } @@ -380,7 +380,7 @@ impl PageCacheBackend for ExfatFS { Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { if self.fs_size() < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "invalid write size") } diff --git a/kernel/src/fs/exfat/inode.rs b/kernel/src/fs/exfat/inode.rs index a01f094ec..c72c22b0c 100644 --- a/kernel/src/fs/exfat/inode.rs +++ b/kernel/src/fs/exfat/inode.rs @@ -13,7 +13,7 @@ use aster_block::{ BLOCK_SIZE, }; use aster_rights::Full; -use ostd::mm::{Frame, VmIo}; +use ostd::mm::{UntypedFrame, VmIo}; use super::{ constants::*, @@ -135,7 +135,7 @@ struct ExfatInodeInner { } impl PageCacheBackend for ExfatInode { - fn read_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { let inner = self.inner.read(); if inner.size < idx * PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "Invalid read size") @@ -150,7 +150,7 @@ impl PageCacheBackend for ExfatInode { Ok(waiter) } - fn write_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { let inner = self.inner.read(); let sector_size = inner.fs().sector_size(); diff --git a/kernel/src/fs/exfat/mod.rs b/kernel/src/fs/exfat/mod.rs index 6440b85b9..03d46090a 100644 --- a/kernel/src/fs/exfat/mod.rs +++ b/kernel/src/fs/exfat/mod.rs @@ -22,7 +22,7 @@ mod test { BlockDevice, BlockDeviceMeta, }; use ostd::{ - mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE}, + mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE}, prelude::*, }; use rand::{rngs::SmallRng, RngCore, SeedableRng}; @@ -40,10 +40,10 @@ mod test { /// Followings are implementations of memory simulated block device pub const SECTOR_SIZE: usize = 512; - struct ExfatMemoryBioQueue(Segment); + struct ExfatMemoryBioQueue(UntypedSegment); impl ExfatMemoryBioQueue { - pub fn new(segment: Segment) -> Self { + pub fn new(segment: UntypedSegment) -> Self { ExfatMemoryBioQueue(segment) } @@ -57,7 +57,7 @@ mod test { } impl ExfatMemoryDisk { - pub fn new(segment: Segment) -> Self { + pub fn new(segment: UntypedSegment) -> Self { ExfatMemoryDisk { queue: ExfatMemoryBioQueue::new(segment), } @@ -111,7 +111,7 @@ mod test { static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../test/build/exfat.img"); /// Read exfat disk image - fn new_vm_segment_from_image() -> Segment { + fn new_vm_segment_from_image() -> UntypedSegment { let vm_segment = FrameAllocOptions::new(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE)) .uninit(true) .alloc_contiguous() diff --git a/kernel/src/fs/ext2/block_group.rs b/kernel/src/fs/ext2/block_group.rs index 8391967e8..9fd28ea9f 100644 --- a/kernel/src/fs/ext2/block_group.rs +++ b/kernel/src/fs/ext2/block_group.rs @@ -28,7 +28,7 @@ struct BlockGroupImpl { impl BlockGroup { /// Loads and constructs a block group. pub fn load( - group_descriptors_segment: &Segment, + group_descriptors_segment: &UntypedSegment, idx: usize, block_device: &dyn BlockDevice, super_block: &SuperBlock, @@ -318,7 +318,7 @@ impl Debug for BlockGroup { } impl PageCacheBackend for BlockGroupImpl { - fn read_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; let bio_segment = BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice); @@ -328,7 +328,7 @@ impl PageCacheBackend for BlockGroupImpl { .read_blocks_async(bid, bio_segment) } - fn write_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { let bid = self.inode_table_bid + idx as Ext2Bid; let bio_segment = BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice); diff --git a/kernel/src/fs/ext2/fs.rs b/kernel/src/fs/ext2/fs.rs index 5026beaf5..036d66bd4 100644 --- a/kernel/src/fs/ext2/fs.rs +++ b/kernel/src/fs/ext2/fs.rs @@ -23,7 +23,7 @@ pub struct Ext2 { blocks_per_group: Ext2Bid, inode_size: usize, block_size: usize, - group_descriptors_segment: Segment, + group_descriptors_segment: UntypedSegment, self_ref: Weak, } @@ -63,7 +63,7 @@ impl Ext2 { // Load the block groups information let load_block_groups = |fs: Weak, block_device: &dyn BlockDevice, - group_descriptors_segment: &Segment| + group_descriptors_segment: &UntypedSegment| -> Result> { let block_groups_count = super_block.block_groups_count() as usize; let mut block_groups = Vec::with_capacity(block_groups_count); diff --git a/kernel/src/fs/ext2/indirect_block_cache.rs b/kernel/src/fs/ext2/indirect_block_cache.rs index 399ee15ce..51125febe 100644 --- a/kernel/src/fs/ext2/indirect_block_cache.rs +++ b/kernel/src/fs/ext2/indirect_block_cache.rs @@ -132,7 +132,7 @@ impl IndirectBlockCache { /// Represents a single indirect block buffer cached by the `IndirectCache`. #[derive(Clone, Debug)] pub struct IndirectBlock { - frame: Frame, + frame: UntypedFrame, state: State, } diff --git a/kernel/src/fs/ext2/inode.rs b/kernel/src/fs/ext2/inode.rs index 358c284dd..dae0202a9 100644 --- a/kernel/src/fs/ext2/inode.rs +++ b/kernel/src/fs/ext2/inode.rs @@ -1733,7 +1733,7 @@ impl InodeImpl { writer: &mut VmWriter, ) -> Result; pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>; - pub fn read_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result; + pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result; pub fn write_blocks_async( &self, bid: Ext2Bid, @@ -1741,7 +1741,7 @@ impl InodeImpl { reader: &mut VmReader, ) -> Result; pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>; - pub fn write_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result; + pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result; } /// Manages the inode blocks and block I/O operations. @@ -1789,7 +1789,7 @@ impl InodeBlockManager { } } - pub fn read_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result { + pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result { let mut bio_waiter = BioWaiter::new(); for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { @@ -1834,7 +1834,7 @@ impl InodeBlockManager { } } - pub fn write_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result { + pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result { let mut bio_waiter = BioWaiter::new(); for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { @@ -1858,12 +1858,12 @@ impl InodeBlockManager { } impl PageCacheBackend for InodeBlockManager { - fn read_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { let bid = idx as Ext2Bid; self.read_block_async(bid, frame) } - fn write_page_async(&self, idx: usize, frame: &Frame) -> Result { + fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result { let bid = idx as Ext2Bid; self.write_block_async(bid, frame) } diff --git a/kernel/src/fs/ext2/prelude.rs b/kernel/src/fs/ext2/prelude.rs index 015bae3de..0ec1f3d8a 100644 --- a/kernel/src/fs/ext2/prelude.rs +++ b/kernel/src/fs/ext2/prelude.rs @@ -13,7 +13,7 @@ pub(super) use aster_block::{ }; pub(super) use aster_rights::Full; pub(super) use ostd::{ - mm::{Frame, FrameAllocOptions, Segment, VmIo}, + mm::{FrameAllocOptions, UntypedFrame, UntypedSegment, VmIo}, sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard}, }; pub(super) use static_assertions::const_assert; diff --git a/kernel/src/fs/ramfs/fs.rs b/kernel/src/fs/ramfs/fs.rs index a996cf459..5fcbd9ada 100644 --- a/kernel/src/fs/ramfs/fs.rs +++ b/kernel/src/fs/ramfs/fs.rs @@ -11,7 +11,7 @@ use aster_rights::Full; use aster_util::slot_vec::SlotVec; use hashbrown::HashMap; use ostd::{ - mm::{Frame, VmIo}, + mm::{UntypedFrame, VmIo}, sync::{PreemptDisabled, RwLockWriteGuard}, }; @@ -484,7 +484,7 @@ impl RamInode { } impl PageCacheBackend for RamInode { - fn read_page_async(&self, _idx: usize, frame: &Frame) -> Result { + fn read_page_async(&self, _idx: usize, frame: &UntypedFrame) -> Result { // Initially, any block/page in a RamFs inode contains all zeros frame .writer() @@ -494,7 +494,7 @@ impl PageCacheBackend for RamInode { Ok(BioWaiter::new()) } - fn write_page_async(&self, _idx: usize, _frame: &Frame) -> Result { + fn write_page_async(&self, _idx: usize, _frame: &UntypedFrame) -> Result { // do nothing Ok(BioWaiter::new()) } diff --git a/kernel/src/fs/utils/page_cache.rs b/kernel/src/fs/utils/page_cache.rs index 8c721c7d2..8f4ca844d 100644 --- a/kernel/src/fs/utils/page_cache.rs +++ b/kernel/src/fs/utils/page_cache.rs @@ -8,7 +8,7 @@ use align_ext::AlignExt; use aster_block::bio::{BioStatus, BioWaiter}; use aster_rights::Full; use lru::LruCache; -use ostd::mm::{Frame, FrameAllocOptions, VmIo}; +use ostd::mm::{FrameAllocOptions, UntypedFrame, VmIo}; use crate::{ prelude::*, @@ -381,7 +381,7 @@ impl PageCacheManager { Ok(()) } - fn ondemand_readahead(&self, idx: usize) -> Result { + fn ondemand_readahead(&self, idx: usize) -> Result { let mut pages = self.pages.lock(); let mut ra_state = self.ra_state.lock(); let backend = self.backend(); @@ -438,7 +438,7 @@ impl Debug for PageCacheManager { } impl Pager for PageCacheManager { - fn commit_page(&self, idx: usize) -> Result { + fn commit_page(&self, idx: usize) -> Result { self.ondemand_readahead(idx) } @@ -469,7 +469,7 @@ impl Pager for PageCacheManager { Ok(()) } - fn commit_overwrite(&self, idx: usize) -> Result { + fn commit_overwrite(&self, idx: usize) -> Result { if let Some(page) = self.pages.lock().get(&idx) { return Ok(page.frame.clone()); } @@ -481,7 +481,7 @@ impl Pager for PageCacheManager { #[derive(Debug)] struct Page { - frame: Frame, + frame: UntypedFrame, state: PageState, } @@ -502,7 +502,7 @@ impl Page { }) } - pub fn frame(&self) -> &Frame { + pub fn frame(&self) -> &UntypedFrame { &self.frame } @@ -531,16 +531,16 @@ enum PageState { /// This trait represents the backend for the page cache. pub trait PageCacheBackend: Sync + Send { /// Reads a page from the backend asynchronously. - fn read_page_async(&self, idx: usize, frame: &Frame) -> Result; + fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result; /// Writes a page to the backend asynchronously. - fn write_page_async(&self, idx: usize, frame: &Frame) -> Result; + fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result; /// Returns the number of pages in the backend. fn npages(&self) -> usize; } impl dyn PageCacheBackend { /// Reads a page from the backend synchronously. - fn read_page(&self, idx: usize, frame: &Frame) -> Result<()> { + fn read_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> { let waiter = self.read_page_async(idx, frame)?; match waiter.wait() { Some(BioStatus::Complete) => Ok(()), @@ -548,7 +548,7 @@ impl dyn PageCacheBackend { } } /// Writes a page to the backend synchronously. - fn write_page(&self, idx: usize, frame: &Frame) -> Result<()> { + fn write_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> { let waiter = self.write_page_async(idx, frame)?; match waiter.wait() { Some(BioStatus::Complete) => Ok(()), diff --git a/kernel/src/util/ring_buffer.rs b/kernel/src/util/ring_buffer.rs index e59f28212..4daf9ce2d 100644 --- a/kernel/src/util/ring_buffer.rs +++ b/kernel/src/util/ring_buffer.rs @@ -8,12 +8,12 @@ use core::{ use align_ext::AlignExt; use inherit_methods_macro::inherit_methods; -use ostd::mm::{FrameAllocOptions, Segment, VmIo}; +use ostd::mm::{FrameAllocOptions, UntypedSegment, VmIo}; use super::{MultiRead, MultiWrite}; use crate::prelude::*; -/// A lock-free SPSC FIFO ring buffer backed by a [`Segment`]. +/// A lock-free SPSC FIFO ring buffer backed by a [`UntypedSegment`]. /// /// The ring buffer supports `push`/`pop` any `T: Pod` items, also /// supports `write`/`read` any bytes data based on [`VmReader`]/[`VmWriter`]. @@ -46,7 +46,7 @@ use crate::prelude::*; /// } /// ``` pub struct RingBuffer { - segment: Segment, + segment: UntypedSegment, capacity: usize, tail: AtomicUsize, head: AtomicUsize, diff --git a/kernel/src/vdso.rs b/kernel/src/vdso.rs index d0c4d9bf6..fcfa8f07e 100644 --- a/kernel/src/vdso.rs +++ b/kernel/src/vdso.rs @@ -21,7 +21,7 @@ use aster_rights::Rights; use aster_time::{read_monotonic_time, Instant}; use aster_util::coeff::Coeff; use ostd::{ - mm::{Frame, VmIo, PAGE_SIZE}, + mm::{UntypedFrame, VmIo, PAGE_SIZE}, sync::SpinLock, Pod, }; @@ -199,9 +199,9 @@ struct Vdso { data: SpinLock, /// The VMO of the entire VDSO, including the library text and the VDSO data. vmo: Arc, - /// The `Frame` that contains the VDSO data. This frame is contained in and + /// The `UntypedFrame` that contains the VDSO data. This frame is contained in and /// will not be removed from the VDSO VMO. - data_frame: Frame, + data_frame: UntypedFrame, } /// A `SpinLock` for the `seq` field in `VdsoData`. diff --git a/kernel/src/vm/util.rs b/kernel/src/vm/util.rs index 68c445fe7..c5ccc5439 100644 --- a/kernel/src/vm/util.rs +++ b/kernel/src/vm/util.rs @@ -1,11 +1,11 @@ // SPDX-License-Identifier: MPL-2.0 -use ostd::mm::{Frame, FrameAllocOptions}; +use ostd::mm::{FrameAllocOptions, UntypedFrame}; use crate::prelude::*; -/// Creates a new `Frame` and initializes it with the contents of the `src`. -pub fn duplicate_frame(src: &Frame) -> Result { +/// Creates a new `UntypedFrame` and initializes it with the contents of the `src`. +pub fn duplicate_frame(src: &UntypedFrame) -> Result { let new_frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?; new_frame.copy_from(src); Ok(new_frame) diff --git a/kernel/src/vm/vmar/vm_mapping.rs b/kernel/src/vm/vmar/vm_mapping.rs index 0939da861..4528888bb 100644 --- a/kernel/src/vm/vmar/vm_mapping.rs +++ b/kernel/src/vm/vmar/vm_mapping.rs @@ -8,8 +8,8 @@ use core::{ use align_ext::AlignExt; use ostd::mm::{ - tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, Frame, FrameAllocOptions, PageFlags, - PageProperty, VmSpace, + tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty, + UntypedFrame, VmSpace, }; use super::interval_set::Interval; @@ -216,7 +216,7 @@ impl VmMapping { Ok(()) } - fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(Frame, bool)> { + fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(UntypedFrame, bool)> { let mut is_readonly = false; let Some(vmo) = &self.vmo else { return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly)); @@ -264,7 +264,7 @@ impl VmMapping { let vm_perms = self.perms - VmPerms::WRITE; let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?; - let operate = move |commit_fn: &mut dyn FnMut() -> Result| { + let operate = move |commit_fn: &mut dyn FnMut() -> Result| { if let VmItem::NotMapped { .. } = cursor.query().unwrap() { // We regard all the surrounding pages as accessed, no matter // if it is really so. Then the hardware won't bother to update @@ -432,7 +432,7 @@ impl MappedVmo { /// /// If the VMO has not committed a frame at this index, it will commit /// one first and return it. - fn get_committed_frame(&self, page_offset: usize) -> Result { + fn get_committed_frame(&self, page_offset: usize) -> Result { debug_assert!(page_offset < self.range.len()); debug_assert!(page_offset % PAGE_SIZE == 0); self.vmo.commit_page(self.range.start + page_offset) @@ -444,7 +444,7 @@ impl MappedVmo { /// perform other operations. fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { debug_assert!(range.start < self.range.len()); debug_assert!(range.end <= self.range.len()); diff --git a/kernel/src/vm/vmo/dyn_cap.rs b/kernel/src/vm/vmo/dyn_cap.rs index 245657ff6..f38d31e5a 100644 --- a/kernel/src/vm/vmo/dyn_cap.rs +++ b/kernel/src/vm/vmo/dyn_cap.rs @@ -3,14 +3,14 @@ use core::ops::Range; use aster_rights::{Rights, TRights}; -use ostd::mm::{Frame, VmIo}; +use ostd::mm::{UntypedFrame, VmIo}; use super::{CommitFlags, Vmo, VmoRightsOp}; use crate::prelude::*; impl Vmo { /// Commits a page at specific offset - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { self.check_rights(Rights::WRITE)?; self.0.commit_page(offset) } @@ -39,7 +39,7 @@ impl Vmo { /// perform other operations. pub(in crate::vm) fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.check_rights(Rights::WRITE)?; self.0 @@ -112,7 +112,7 @@ impl Vmo { /// # Access rights /// /// The method requires the Write right. - pub fn replace(&self, page: Frame, page_idx: usize) -> Result<()> { + pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> { self.check_rights(Rights::WRITE)?; self.0.replace(page, page_idx) } diff --git a/kernel/src/vm/vmo/mod.rs b/kernel/src/vm/vmo/mod.rs index 73b8aef77..0d5e8cf49 100644 --- a/kernel/src/vm/vmo/mod.rs +++ b/kernel/src/vm/vmo/mod.rs @@ -11,7 +11,7 @@ use align_ext::AlignExt; use aster_rights::Rights; use ostd::{ collections::xarray::{CursorMut, XArray}, - mm::{Frame, FrameAllocOptions, VmReader, VmWriter}, + mm::{FrameAllocOptions, UntypedFrame, VmReader, VmWriter}, }; use crate::prelude::*; @@ -66,8 +66,8 @@ pub use pager::Pager; /// # Implementation /// /// `Vmo` provides high-level APIs for address space management by wrapping -/// around its low-level counterpart [`ostd::mm::Frame`]. -/// Compared with `Frame`, +/// around its low-level counterpart [`ostd::mm::UntypedFrame`]. +/// Compared with `UntypedFrame`, /// `Vmo` is easier to use (by offering more powerful APIs) and /// harder to misuse (thanks to its nature of being capability). #[derive(Debug)] @@ -125,12 +125,12 @@ bitflags! { } } -/// `Pages` is the struct that manages the `Frame`s stored in `Vmo_`. +/// `Pages` is the struct that manages the `UntypedFrame`s stored in `Vmo_`. pub(super) enum Pages { /// `Pages` that cannot be resized. This kind of `Pages` will have a constant size. - Nonresizable(Mutex>, usize), + Nonresizable(Mutex>, usize), /// `Pages` that can be resized and have a variable size. - Resizable(Mutex<(XArray, usize)>), + Resizable(Mutex<(XArray, usize)>), } impl Clone for Pages { @@ -149,7 +149,7 @@ impl Clone for Pages { impl Pages { fn with(&self, func: F) -> R where - F: FnOnce(&mut XArray, usize) -> R, + F: FnOnce(&mut XArray, usize) -> R, { match self { Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size), @@ -201,16 +201,16 @@ impl CommitFlags { } impl Vmo_ { - /// Prepares a new `Frame` for the target index in pages, returns this new frame. - fn prepare_page(&self, page_idx: usize) -> Result { + /// Prepares a new `UntypedFrame` for the target index in pages, returns this new frame. + fn prepare_page(&self, page_idx: usize) -> Result { match &self.pager { None => Ok(FrameAllocOptions::new(1).alloc_single()?), Some(pager) => pager.commit_page(page_idx), } } - /// Prepares a new `Frame` for the target index in the VMO, returns this new frame. - fn prepare_overwrite(&self, page_idx: usize) -> Result { + /// Prepares a new `UntypedFrame` for the target index in the VMO, returns this new frame. + fn prepare_overwrite(&self, page_idx: usize) -> Result { if let Some(pager) = &self.pager { pager.commit_overwrite(page_idx) } else { @@ -220,9 +220,9 @@ impl Vmo_ { fn commit_with_cursor( &self, - cursor: &mut CursorMut<'_, Frame>, + cursor: &mut CursorMut<'_, UntypedFrame>, commit_flags: CommitFlags, - ) -> Result { + ) -> Result { let new_page = { if let Some(committed_page) = cursor.load() { // Fast path: return the page directly. @@ -241,7 +241,7 @@ impl Vmo_ { /// Commits the page corresponding to the target offset in the VMO and return that page. /// If the current offset has already been committed, the page will be returned directly. - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { let page_idx = offset / PAGE_SIZE; self.pages.with(|pages, size| { if offset >= size { @@ -279,7 +279,7 @@ impl Vmo_ { commit_flags: CommitFlags, ) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.pages.with(|pages, size| { if range.end > size { @@ -315,7 +315,7 @@ impl Vmo_ { let read_range = offset..(offset + read_len); let mut read_offset = offset % PAGE_SIZE; - let read = move |commit_fn: &mut dyn FnMut() -> Result| { + let read = move |commit_fn: &mut dyn FnMut() -> Result| { let frame = commit_fn()?; frame.reader().skip(read_offset).read_fallible(writer)?; read_offset = 0; @@ -331,7 +331,7 @@ impl Vmo_ { let write_range = offset..(offset + write_len); let mut write_offset = offset % PAGE_SIZE; - let mut write = move |commit_fn: &mut dyn FnMut() -> Result| { + let mut write = move |commit_fn: &mut dyn FnMut() -> Result| { let frame = commit_fn()?; frame.writer().skip(write_offset).write_fallible(reader)?; write_offset = 0; @@ -401,7 +401,7 @@ impl Vmo_ { Ok(()) } - fn decommit_pages(&self, pages: &mut XArray, range: Range) -> Result<()> { + fn decommit_pages(&self, pages: &mut XArray, range: Range) -> Result<()> { let page_idx_range = get_page_idx_range(&range); let mut cursor = pages.cursor_mut(page_idx_range.start as u64); for page_idx in page_idx_range { @@ -426,7 +426,7 @@ impl Vmo_ { self.flags } - fn replace(&self, page: Frame, page_idx: usize) -> Result<()> { + fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> { self.pages.with(|pages, size| { if page_idx >= size / PAGE_SIZE { return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo"); diff --git a/kernel/src/vm/vmo/options.rs b/kernel/src/vm/vmo/options.rs index b669ba02a..db8e7ed52 100644 --- a/kernel/src/vm/vmo/options.rs +++ b/kernel/src/vm/vmo/options.rs @@ -8,7 +8,7 @@ use align_ext::AlignExt; use aster_rights::{Rights, TRightSet, TRights}; use ostd::{ collections::xarray::XArray, - mm::{Frame, FrameAllocOptions}, + mm::{FrameAllocOptions, UntypedFrame}, }; use super::{Pager, Pages, Vmo, VmoFlags}; @@ -137,7 +137,7 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option>) -> Re }) } -fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result> { +fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result> { if flags.contains(VmoFlags::CONTIGUOUS) { // if the vmo is continuous, we need to allocate frames for the vmo let frames_num = size / PAGE_SIZE; diff --git a/kernel/src/vm/vmo/pager.rs b/kernel/src/vm/vmo/pager.rs index dbe7622ad..2dc4555a6 100644 --- a/kernel/src/vm/vmo/pager.rs +++ b/kernel/src/vm/vmo/pager.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: MPL-2.0 -use ostd::mm::Frame; +use ostd::mm::UntypedFrame; use crate::prelude::*; @@ -26,7 +26,7 @@ pub trait Pager: Send + Sync { /// whatever frame that may or may not be the same as the last time. /// /// It is up to the pager to decide the range of valid indices. - fn commit_page(&self, idx: usize) -> Result; + fn commit_page(&self, idx: usize) -> Result; /// Notify the pager that the frame at a specified index has been updated. /// @@ -54,5 +54,5 @@ pub trait Pager: Send + Sync { /// Ask the pager to provide a frame at a specified index. /// Notify the pager that the frame will be fully overwritten soon, so pager can /// choose not to initialize it. - fn commit_overwrite(&self, idx: usize) -> Result; + fn commit_overwrite(&self, idx: usize) -> Result; } diff --git a/kernel/src/vm/vmo/static_cap.rs b/kernel/src/vm/vmo/static_cap.rs index 5f484239b..e0b246922 100644 --- a/kernel/src/vm/vmo/static_cap.rs +++ b/kernel/src/vm/vmo/static_cap.rs @@ -4,14 +4,14 @@ use core::ops::Range; use aster_rights::{Dup, Rights, TRightSet, TRights, Write}; use aster_rights_proc::require; -use ostd::mm::{Frame, VmIo}; +use ostd::mm::{UntypedFrame, VmIo}; use super::{CommitFlags, Vmo, VmoRightsOp}; use crate::prelude::*; impl Vmo> { /// Commits a page at specific offset. - pub fn commit_page(&self, offset: usize) -> Result { + pub fn commit_page(&self, offset: usize) -> Result { self.check_rights(Rights::WRITE)?; self.0.commit_page(offset) } @@ -41,7 +41,7 @@ impl Vmo> { #[require(R > Write)] pub(in crate::vm) fn operate_on_range(&self, range: &Range, operate: F) -> Result<()> where - F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, + F: FnMut(&mut dyn FnMut() -> Result) -> Result<()>, { self.0 .operate_on_range(range, operate, CommitFlags::empty()) @@ -114,7 +114,7 @@ impl Vmo> { /// /// The method requires the Write right. #[require(R > Write)] - pub fn replace(&self, page: Frame, page_idx: usize) -> Result<()> { + pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> { self.0.replace(page, page_idx) } diff --git a/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs b/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs index 1fc4bc79c..5939cedf8 100644 --- a/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs +++ b/osdk/tests/examples_in_book/write_a_kernel_in_100_lines_templates/lib.rs @@ -40,7 +40,7 @@ fn create_user_space(program: &[u8]) -> UserSpace { .alloc_contiguous() .unwrap(); // Physical memory pages can be only accessed - // via the `Frame` or `Segment` abstraction. + // via the `UntypedFrame` or `UntypedSegment` abstraction. segment.write_bytes(0, program).unwrap(); segment }; diff --git a/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs b/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs index 6e6d162f7..98a7ac24b 100644 --- a/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs +++ b/ostd/src/arch/x86/iommu/dma_remapping/context_table.rs @@ -15,7 +15,7 @@ use crate::{ dma::Daddr, page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags}, page_table::{PageTableError, PageTableItem}, - Frame, FrameAllocOptions, Paddr, PageFlags, PageTable, VmIo, PAGE_SIZE, + FrameAllocOptions, Paddr, PageFlags, PageTable, UntypedFrame, VmIo, PAGE_SIZE, }, }; @@ -38,7 +38,7 @@ impl RootEntry { pub struct RootTable { /// Total 256 bus, each entry is 128 bits. - root_frame: Frame, + root_frame: UntypedFrame, // TODO: Use radix tree instead. context_tables: BTreeMap, } @@ -236,7 +236,7 @@ pub enum AddressWidth { pub struct ContextTable { /// Total 32 devices, each device has 8 functions. - entries_frame: Frame, + entries_frame: UntypedFrame, page_tables: BTreeMap>, } diff --git a/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs index 54c715154..41b6efbc3 100644 --- a/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs +++ b/ostd/src/arch/x86/iommu/interrupt_remapping/table.rs @@ -9,7 +9,7 @@ use int_to_c_enum::TryFromInt; use super::IrtEntryHandle; use crate::{ - mm::{paddr_to_vaddr, FrameAllocOptions, Segment, PAGE_SIZE}, + mm::{paddr_to_vaddr, FrameAllocOptions, UntypedSegment, PAGE_SIZE}, sync::{LocalIrqDisabled, SpinLock}, }; @@ -23,7 +23,7 @@ enum ExtendedInterruptMode { pub struct IntRemappingTable { size: u16, extended_interrupt_mode: ExtendedInterruptMode, - frames: Segment, + frames: UntypedSegment, /// The global allocator for Interrupt remapping entry. allocator: SpinLock, handles: Vec>>, @@ -35,7 +35,7 @@ impl IntRemappingTable { Some(self.handles.get(id).unwrap().clone()) } - /// Creates an Interrupt Remapping Table with one Frame (default). + /// Creates an Interrupt Remapping Table with one UntypedFrame (default). pub(super) fn new() -> Self { const DEFAULT_PAGES: usize = 1; let segment = FrameAllocOptions::new(DEFAULT_PAGES) diff --git a/ostd/src/arch/x86/iommu/invalidate/queue.rs b/ostd/src/arch/x86/iommu/invalidate/queue.rs index f8da4a41c..4a4fc3818 100644 --- a/ostd/src/arch/x86/iommu/invalidate/queue.rs +++ b/ostd/src/arch/x86/iommu/invalidate/queue.rs @@ -3,12 +3,12 @@ use core::mem::size_of; use crate::{ - mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE}, + mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE}, prelude::Paddr, }; pub struct Queue { - segment: Segment, + segment: UntypedSegment, queue_size: usize, tail: usize, } diff --git a/ostd/src/boot/smp.rs b/ostd/src/boot/smp.rs index 01254eed8..543e8d371 100644 --- a/ostd/src/boot/smp.rs +++ b/ostd/src/boot/smp.rs @@ -11,10 +11,9 @@ use crate::{ arch::boot::smp::{bringup_all_aps, get_num_processors}, cpu, mm::{ + frame::{self, Segment}, kspace::KernelMeta, - paddr_to_vaddr, - page::{self, ContPages}, - PAGE_SIZE, + paddr_to_vaddr, PAGE_SIZE, }, task::Task, }; @@ -25,7 +24,7 @@ const AP_BOOT_STACK_SIZE: usize = PAGE_SIZE * 64; pub(crate) struct ApBootInfo { /// It holds the boot stack top pointers used by all APs. - pub(crate) boot_stack_array: ContPages, + pub(crate) boot_stack_array: Segment, /// `per_ap_info` maps each AP's ID to its associated boot information. per_ap_info: BTreeMap, } @@ -33,10 +32,10 @@ pub(crate) struct ApBootInfo { struct PerApInfo { is_started: AtomicBool, // TODO: When the AP starts up and begins executing tasks, the boot stack will - // no longer be used, and the `ContPages` can be deallocated (this problem also + // no longer be used, and the `Segment` can be deallocated (this problem also // exists in the boot processor, but the memory it occupies should be returned // to the frame allocator). - boot_stack_pages: ContPages, + boot_stack_pages: Segment, } static AP_LATE_ENTRY: Once = Once::new(); @@ -64,12 +63,12 @@ pub fn boot_all_aps() { let mut per_ap_info = BTreeMap::new(); // Use two pages to place stack pointers of all APs, thus support up to 1024 APs. let boot_stack_array = - page::allocator::alloc_contiguous(2 * PAGE_SIZE, |_| KernelMeta::default()).unwrap(); + frame::allocator::alloc_contiguous(2 * PAGE_SIZE, |_| KernelMeta::default()).unwrap(); assert!(num_cpus < 1024); for ap in 1..num_cpus { let boot_stack_pages = - page::allocator::alloc_contiguous(AP_BOOT_STACK_SIZE, |_| KernelMeta::default()) + frame::allocator::alloc_contiguous(AP_BOOT_STACK_SIZE, |_| KernelMeta::default()) .unwrap(); let boot_stack_ptr = paddr_to_vaddr(boot_stack_pages.end_paddr()); let stack_array_ptr = paddr_to_vaddr(boot_stack_array.start_paddr()) as *mut u64; diff --git a/ostd/src/cpu/local/mod.rs b/ostd/src/cpu/local/mod.rs index 0ad414819..ed05248d6 100644 --- a/ostd/src/cpu/local/mod.rs +++ b/ostd/src/cpu/local/mod.rs @@ -44,10 +44,9 @@ use spin::Once; use crate::{ arch, mm::{ + frame::{self, Segment}, kspace::KernelMeta, - paddr_to_vaddr, - page::{self, ContPages}, - PAGE_SIZE, + paddr_to_vaddr, PAGE_SIZE, }, }; @@ -79,7 +78,7 @@ pub(crate) unsafe fn early_init_bsp_local_base() { } /// The BSP initializes the CPU-local areas for APs. -static CPU_LOCAL_STORAGES: Once>> = Once::new(); +static CPU_LOCAL_STORAGES: Once>> = Once::new(); /// Initializes the CPU local data for the bootstrap processor (BSP). /// @@ -100,7 +99,7 @@ pub unsafe fn init_on_bsp() { for _ in 1..num_cpus { let ap_pages = { let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE); - page::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap() + frame::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap() }; let ap_pages_ptr = paddr_to_vaddr(ap_pages.start_paddr()) as *mut u8; diff --git a/ostd/src/lib.rs b/ostd/src/lib.rs index dab36c37b..15ac2b80b 100644 --- a/ostd/src/lib.rs +++ b/ostd/src/lib.rs @@ -84,7 +84,7 @@ unsafe fn init() { boot::init(); logger::init(); - mm::page::allocator::init(); + mm::frame::allocator::init(); mm::kspace::init_kernel_page_table(mm::init_page_meta()); mm::dma::init(); diff --git a/ostd/src/mm/dma/dma_coherent.rs b/ostd/src/mm/dma/dma_coherent.rs index 0c2dd4542..872b868f4 100644 --- a/ostd/src/mm/dma/dma_coherent.rs +++ b/ostd/src/mm/dma/dma_coherent.rs @@ -13,7 +13,7 @@ use crate::{ io::VmIoOnce, kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE}, page_prop::CachePolicy, - HasPaddr, Infallible, Paddr, PodOnce, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE, + HasPaddr, Infallible, Paddr, PodOnce, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE, }, prelude::*, }; @@ -38,7 +38,7 @@ pub struct DmaCoherent { #[derive(Debug)] struct DmaCoherentInner { - vm_segment: Segment, + vm_segment: UntypedSegment, start_daddr: Daddr, is_cache_coherent: bool, } @@ -54,7 +54,7 @@ impl DmaCoherent { /// The method fails if any part of the given `vm_segment` /// already belongs to a DMA mapping. pub fn map( - vm_segment: Segment, + vm_segment: UntypedSegment, is_cache_coherent: bool, ) -> core::result::Result { let frame_count = vm_segment.nbytes() / PAGE_SIZE; @@ -123,7 +123,7 @@ impl HasDaddr for DmaCoherent { } impl Deref for DmaCoherent { - type Target = Segment; + type Target = UntypedSegment; fn deref(&self) -> &Self::Target { &self.inner.vm_segment } diff --git a/ostd/src/mm/dma/dma_stream.rs b/ostd/src/mm/dma/dma_stream.rs index 464d64ce8..389f74dd4 100644 --- a/ostd/src/mm/dma/dma_stream.rs +++ b/ostd/src/mm/dma/dma_stream.rs @@ -11,7 +11,7 @@ use crate::{ error::Error, mm::{ dma::{dma_type, Daddr, DmaType}, - HasPaddr, Infallible, Paddr, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE, + HasPaddr, Infallible, Paddr, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE, }, }; @@ -34,7 +34,7 @@ pub struct DmaStream { #[derive(Debug)] struct DmaStreamInner { - vm_segment: Segment, + vm_segment: UntypedSegment, start_daddr: Daddr, /// TODO: remove this field when on x86. #[allow(unused)] @@ -55,11 +55,11 @@ pub enum DmaDirection { } impl DmaStream { - /// Establishes DMA stream mapping for a given [`Segment`]. + /// Establishes DMA stream mapping for a given [`UntypedSegment`]. /// /// The method fails if the segment already belongs to a DMA mapping. pub fn map( - vm_segment: Segment, + vm_segment: UntypedSegment, direction: DmaDirection, is_cache_coherent: bool, ) -> Result { @@ -107,13 +107,13 @@ impl DmaStream { }) } - /// Gets the underlying [`Segment`]. + /// Gets the underlying [`UntypedSegment`]. /// /// Usually, the CPU side should not access the memory /// after the DMA mapping is established because /// there is a chance that the device is updating /// the memory. Do this at your own risk. - pub fn vm_segment(&self) -> &Segment { + pub fn vm_segment(&self) -> &UntypedSegment { &self.inner.vm_segment } diff --git a/ostd/src/mm/page/allocator.rs b/ostd/src/mm/frame/allocator.rs similarity index 89% rename from ostd/src/mm/page/allocator.rs rename to ostd/src/mm/frame/allocator.rs index d563a1ba1..5913444fe 100644 --- a/ostd/src/mm/page/allocator.rs +++ b/ostd/src/mm/frame/allocator.rs @@ -10,7 +10,7 @@ use buddy_system_allocator::FrameAllocator; use log::info; use spin::Once; -use super::{cont_pages::ContPages, meta::PageMeta, Page}; +use super::{meta::FrameMeta, segment::Segment, Frame}; use crate::{ boot::memory_region::MemoryRegionType, mm::{Paddr, PAGE_SIZE}, @@ -62,7 +62,7 @@ pub(in crate::mm) static PAGE_ALLOCATOR: Once> /// Allocate a single page. /// /// The metadata of the page is initialized with the given metadata. -pub(crate) fn alloc_single(metadata: M) -> Option> { +pub(crate) fn alloc_single(metadata: M) -> Option> { PAGE_ALLOCATOR .get() .unwrap() @@ -71,7 +71,7 @@ pub(crate) fn alloc_single(metadata: M) -> Option> { .alloc(1) .map(|idx| { let paddr = idx * PAGE_SIZE; - Page::from_unused(paddr, metadata) + Frame::from_unused(paddr, metadata) }) } @@ -84,7 +84,7 @@ pub(crate) fn alloc_single(metadata: M) -> Option> { /// # Panics /// /// The function panics if the length is not base-page-aligned. -pub(crate) fn alloc_contiguous(len: usize, metadata_fn: F) -> Option> +pub(crate) fn alloc_contiguous(len: usize, metadata_fn: F) -> Option> where F: FnMut(Paddr) -> M, { @@ -95,9 +95,7 @@ where .disable_irq() .lock() .alloc(len / PAGE_SIZE) - .map(|start| { - ContPages::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len, metadata_fn) - }) + .map(|start| Segment::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len, metadata_fn)) } pub(crate) fn init() { diff --git a/ostd/src/mm/page/meta.rs b/ostd/src/mm/frame/meta.rs similarity index 84% rename from ostd/src/mm/page/meta.rs rename to ostd/src/mm/frame/meta.rs index 649a7b986..8992e61c1 100644 --- a/ostd/src/mm/page/meta.rs +++ b/ostd/src/mm/frame/meta.rs @@ -11,7 +11,7 @@ //! address. It is faster, simpler, safer and more versatile compared with an actual static array //! implementation. -pub mod mapping { +pub(crate) mod mapping { //! The metadata of each physical page is linear mapped to fixed virtual addresses //! in [`FRAME_METADATA_RANGE`]. @@ -21,14 +21,14 @@ pub mod mapping { use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE}; /// Converts a physical address of a base page to the virtual address of the metadata slot. - pub const fn page_to_meta(paddr: Paddr) -> Vaddr { + pub(crate) const fn page_to_meta(paddr: Paddr) -> Vaddr { let base = FRAME_METADATA_RANGE.start; let offset = paddr / PAGE_SIZE; base + offset * size_of::() } /// Converts a virtual address of the metadata slot to the physical address of the page. - pub const fn meta_to_page(vaddr: Vaddr) -> Paddr { + pub(crate) const fn meta_to_page(vaddr: Vaddr) -> Paddr { let base = FRAME_METADATA_RANGE.start; let offset = (vaddr - base) / size_of::(); offset * PAGE_SIZE @@ -46,7 +46,7 @@ use align_ext::AlignExt; use log::info; use static_assertions::const_assert_eq; -use super::{allocator, ContPages}; +use super::{allocator, Segment}; use crate::{ arch::mm::PagingConsts, mm::{ @@ -58,7 +58,7 @@ use crate::{ /// The maximum number of bytes of the metadata of a page. pub const PAGE_METADATA_MAX_SIZE: usize = - META_SLOT_SIZE - size_of::() - size_of::(); + META_SLOT_SIZE - size_of::() - size_of::(); /// The maximum alignment in bytes of the metadata of a page. pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::(); @@ -70,7 +70,7 @@ pub(in crate::mm) struct MetaSlot { /// /// It is placed at the beginning of a slot because: /// - the implementation can simply cast a `*const MetaSlot` - /// to a `*const PageMeta` for manipulation; + /// to a `*const FrameMeta` for manipulation; /// - if the metadata need special alignment, we can provide /// at most `PAGE_METADATA_ALIGN` bytes of alignment; /// - the subsequent fields can utilize the padding of the @@ -79,24 +79,24 @@ pub(in crate::mm) struct MetaSlot { /// The reference count of the page. /// /// Specifically, the reference count has the following meaning: - /// * `REF_COUNT_UNUSED`: The page is not in use. - /// * `0`: The page is being constructed ([`Page::from_unused`]) - /// or destructured ([`drop_last_in_place`]). - /// * `1..REF_COUNT_MAX`: The page is in use. - /// * `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to - /// prevent the reference count from overflowing. Otherwise, - /// overflowing the reference count will cause soundness issue. + /// * `REF_COUNT_UNUSED`: The page is not in use. + /// * `0`: The page is being constructed ([`Page::from_unused`]) + /// or destructured ([`drop_last_in_place`]). + /// * `1..REF_COUNT_MAX`: The page is in use. + /// * `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to + /// prevent the reference count from overflowing. Otherwise, + /// overflowing the reference count will cause soundness issue. /// - /// [`Page::from_unused`]: super::Page::from_unused + /// [`Frame::from_unused`]: super::Frame::from_unused pub(super) ref_count: AtomicU32, /// The virtual table that indicates the type of the metadata. - pub(super) vtable_ptr: UnsafeCell>, + pub(super) vtable_ptr: UnsafeCell>, } pub(super) const REF_COUNT_UNUSED: u32 = u32::MAX; const REF_COUNT_MAX: u32 = i32::MAX as u32; -type PageMetaVtablePtr = core::ptr::DynMetadata; +type FrameMetaVtablePtr = core::ptr::DynMetadata; const_assert_eq!(PAGE_SIZE % META_SLOT_SIZE, 0); const_assert_eq!(size_of::(), META_SLOT_SIZE); @@ -113,29 +113,30 @@ const_assert_eq!(size_of::(), META_SLOT_SIZE); /// The implemented structure must have a size less than or equal to /// [`PAGE_METADATA_MAX_SIZE`] and an alignment less than or equal to /// [`PAGE_METADATA_MAX_ALIGN`]. -pub unsafe trait PageMeta: Any + Send + Sync + 'static { +pub unsafe trait FrameMeta: Any + Send + Sync + 'static { + /// Called when the last handle to the page is dropped. fn on_drop(&mut self, _paddr: Paddr) {} } /// Makes a structure usable as a page metadata. /// -/// Directly implementing [`PageMeta`] is not safe since the size and alignment +/// Directly implementing [`FrameMeta`] is not safe since the size and alignment /// must be checked. This macro provides a safe way to implement the trait with /// compile-time checks. #[macro_export] -macro_rules! impl_page_meta { +macro_rules! impl_frame_meta_for { ($($t:ty),*) => { $( use static_assertions::const_assert; - const_assert!(size_of::<$t>() <= $crate::mm::page::meta::PAGE_METADATA_MAX_SIZE); - const_assert!(align_of::<$t>() <= $crate::mm::page::meta::PAGE_METADATA_MAX_ALIGN); + const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); + const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); // SAFETY: The size and alignment of the structure are checked. - unsafe impl $crate::mm::page::meta::PageMeta for $t {} + unsafe impl $crate::mm::frame::meta::FrameMeta for $t {} )* }; } -pub use impl_page_meta; +pub use impl_frame_meta_for; impl MetaSlot { /// Increases the page reference count by one. @@ -178,7 +179,7 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { // SAFETY: The page metadata is initialized and valid. let vtable_ptr = unsafe { vtable_ptr.assume_init_read() }; - let meta_ptr: *mut dyn PageMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr); + let meta_ptr: *mut dyn FrameMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr); // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive @@ -209,12 +210,12 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { #[derive(Debug, Default)] pub struct MetaPageMeta {} -impl_page_meta!(MetaPageMeta); +impl_frame_meta_for!(MetaPageMeta); /// Initializes the metadata of all physical pages. /// -/// The function returns a list of `Page`s containing the metadata. -pub(crate) fn init() -> ContPages { +/// The function returns a list of `Frame`s containing the metadata. +pub(crate) fn init() -> Segment { let max_paddr = { let regions = crate::boot::memory_regions(); regions.iter().map(|r| r.base() + r.len()).max().unwrap() @@ -249,7 +250,7 @@ pub(crate) fn init() -> ContPages { .unwrap(); // Now the metadata pages are mapped, we can initialize the metadata. - ContPages::from_unused(meta_pages..meta_pages + num_meta_pages * PAGE_SIZE, |_| { + Segment::from_unused(meta_pages..meta_pages + num_meta_pages * PAGE_SIZE, |_| { MetaPageMeta {} }) } diff --git a/ostd/src/mm/frame/mod.rs b/ostd/src/mm/frame/mod.rs index 7d69dbe6f..1bd329c86 100644 --- a/ostd/src/mm/frame/mod.rs +++ b/ostd/src/mm/frame/mod.rs @@ -1,91 +1,177 @@ // SPDX-License-Identifier: MPL-2.0 -//! Untyped physical memory management. +//! Physical memory page management. //! -//! A frame is a special page that is _untyped_ memory. -//! It is used to store data irrelevant to the integrity of the kernel. -//! All pages mapped to the virtual address space of the users are backed by -//! frames. Frames, with all the properties of pages, can additionally be safely -//! read and written by the kernel or the user. +//! A page is an aligned, contiguous range of bytes in physical memory. The sizes +//! of base pages and huge pages are architecture-dependent. A page can be mapped +//! to a virtual address using the page table. +//! +//! Pages can be accessed through page handles, namely, [`Frame`]. A page handle +//! is a reference-counted handle to a page. When all handles to a page are dropped, +//! the page is released and can be reused. +//! +//! Pages can have dedicated metadata, which is implemented in the [`meta`] module. +//! The reference count and usage of a page are stored in the metadata as well, leaving +//! the handle only a pointer to the metadata. -pub mod options; +pub mod allocator; +pub mod meta; mod segment; +pub mod untyped; -use core::mem::ManuallyDrop; +use core::{ + any::Any, + marker::PhantomData, + mem::ManuallyDrop, + sync::atomic::{AtomicUsize, Ordering}, +}; +use meta::{ + mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED, +}; pub use segment::Segment; +use untyped::UntypedMeta; -use super::{ - page::{ - meta::{impl_page_meta, MetaSlot}, - DynPage, Page, - }, - Infallible, -}; -use crate::{ - mm::{ - io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter}, - paddr_to_vaddr, HasPaddr, Paddr, PAGE_SIZE, - }, - Error, Result, -}; +use super::{PagingLevel, UntypedFrame, PAGE_SIZE}; +use crate::mm::{Paddr, PagingConsts, Vaddr}; -/// A handle to a physical memory page of untyped memory. -/// -/// An instance of `Frame` is a handle to a page frame (a physical memory -/// page). A cloned `Frame` refers to the same page frame as the original. -/// As the original and cloned instances point to the same physical address, -/// they are treated as equal to each other. Behind the scene, a reference -/// counter is maintained for each page frame so that when all instances of -/// `Frame` that refer to the same page frame are dropped, the page frame -/// will be globally freed. -#[derive(Debug, Clone)] -pub struct Frame { - page: Page, +static MAX_PADDR: AtomicUsize = AtomicUsize::new(0); + +/// A page with a statically-known usage, whose metadata is represented by `M`. +#[derive(Debug)] +pub struct Frame { + pub(super) ptr: *const MetaSlot, + pub(super) _marker: PhantomData, } -impl Frame { - /// Returns the physical address of the page frame. - pub fn start_paddr(&self) -> Paddr { - self.page.paddr() +unsafe impl Send for Frame {} + +unsafe impl Sync for Frame {} + +impl Frame { + /// Get a `Frame` handle with a specific usage from a raw, unused page. + /// + /// The caller should provide the initial metadata of the page. + /// + /// # Panics + /// + /// The function panics if: + /// - the physical address is out of bound or not aligned; + /// - the page is already in use. + pub fn from_unused(paddr: Paddr, metadata: M) -> Self { + assert!(paddr % PAGE_SIZE == 0); + assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); + + // Checking unsafe preconditions of the `FrameMeta` trait. + debug_assert!(size_of::() <= PAGE_METADATA_MAX_SIZE); + debug_assert!(align_of::() <= PAGE_METADATA_MAX_ALIGN); + + let vaddr = mapping::page_to_meta::(paddr); + let ptr = vaddr as *const MetaSlot; + + // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an + // immutable reference to it is always safe. + let slot = unsafe { &*ptr }; + + // `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata + // initialization won't be reordered before this memory compare-and-exchange. + slot.ref_count + .compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed) + .expect("Frame already in use when trying to get a new handle"); + + // SAFETY: We have exclusive access to the page metadata. + let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() }; + vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta)); + + // SAFETY: + // 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the + // metadata storage. + // 2. The size and the alignment of the metadata storage is large enough to hold `M` + // (guaranteed by the safety requirement of the `FrameMeta` trait). + // 3. We have exclusive access to the metadata storage (guaranteed by the reference count). + unsafe { ptr.cast::().cast_mut().write(metadata) }; + + // Assuming no one can create a `Frame` instance directly from the page address, `Relaxed` + // is fine here. Otherwise, we should use `Release` to ensure that the metadata + // initialization won't be reordered after this memory store. + slot.ref_count.store(1, Ordering::Relaxed); + + Self { + ptr, + _marker: PhantomData, + } } - /// Returns the end physical address of the page frame. - pub fn end_paddr(&self) -> Paddr { - self.start_paddr() + PAGE_SIZE + /// Forget the handle to the page. + /// + /// This will result in the page being leaked without calling the custom dropper. + /// + /// A physical address to the page is returned in case the page needs to be + /// restored using [`Frame::from_raw`] later. This is useful when some architectural + /// data structures need to hold the page handle such as the page table. + #[allow(unused)] + pub(in crate::mm) fn into_raw(self) -> Paddr { + let paddr = self.paddr(); + core::mem::forget(self); + paddr } - /// Returns the size of the frame + /// Restore a forgotten `Frame` from a physical address. + /// + /// # Safety + /// + /// The caller should only restore a `Frame` that was previously forgotten using + /// [`Frame::into_raw`]. + /// + /// And the restoring operation should only be done once for a forgotten + /// `Frame`. Otherwise double-free will happen. + /// + /// Also, the caller ensures that the usage of the page is correct. There's + /// no checking of the usage in this function. + pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self { + let vaddr = mapping::page_to_meta::(paddr); + let ptr = vaddr as *const MetaSlot; + + Self { + ptr, + _marker: PhantomData, + } + } + + /// Get the physical address. + pub fn paddr(&self) -> Paddr { + mapping::meta_to_page::(self.ptr as Vaddr) + } + + /// Get the paging level of this page. + /// + /// This is the level of the page table entry that maps the frame, + /// which determines the size of the frame. + /// + /// Currently, the level is always 1, which means the frame is a regular + /// page frame. + pub const fn level(&self) -> PagingLevel { + 1 + } + + /// Size of this page in bytes. pub const fn size(&self) -> usize { - self.page.size() + PAGE_SIZE } - /// Returns a raw pointer to the starting virtual address of the frame. - pub fn as_ptr(&self) -> *const u8 { - paddr_to_vaddr(self.start_paddr()) as *const u8 + /// Get the metadata of this page. + pub fn meta(&self) -> &M { + // SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably + // borrowed as `M` because the type is correct, it lives under the given lifetime, and no + // one will mutably borrow the page metadata after initialization. + unsafe { &*self.ptr.cast() } } - /// Returns a mutable raw pointer to the starting virtual address of the frame. - pub fn as_mut_ptr(&self) -> *mut u8 { - paddr_to_vaddr(self.start_paddr()) as *mut u8 - } - - /// Copies the content of `src` to the frame. - pub fn copy_from(&self, src: &Frame) { - if self.paddr() == src.paddr() { - return; - } - // SAFETY: the source and the destination does not overlap. - unsafe { - core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size()); - } - } - - /// Get the reference count of the frame. + /// Get the reference count of the page. /// /// It returns the number of all references to the page, including all the - /// existing page handles ([`Frame`]) and all the mappings in the page - /// table that points to the page. + /// existing page handles ([`Frame`], [`AnyFrame`]), and all the mappings in the + /// page table that points to the page. /// /// # Safety /// @@ -93,147 +179,203 @@ impl Frame { /// reference count can be changed by other threads at any time including /// potentially between calling this method and acting on the result. pub fn reference_count(&self) -> u32 { - self.page.reference_count() + self.slot().ref_count.load(Ordering::Relaxed) + } + + fn slot(&self) -> &MetaSlot { + // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an + // immutable reference to it is always safe. + unsafe { &*self.ptr } } } -impl From> for Frame { - fn from(page: Page) -> Self { - Self { page } - } -} +impl Clone for Frame { + fn clone(&self) -> Self { + // SAFETY: We have already held a reference to the page. + unsafe { self.slot().inc_ref_count() }; -impl TryFrom for Frame { - type Error = DynPage; - - /// Try converting a [`DynPage`] into the statically-typed [`Frame`]. - /// - /// If the dynamic page is not used as an untyped page frame, it will - /// return the dynamic page itself as is. - fn try_from(page: DynPage) -> core::result::Result { - page.try_into().map(|p: Page| p.into()) - } -} - -impl From for Page { - fn from(frame: Frame) -> Self { - frame.page - } -} - -impl HasPaddr for Frame { - fn paddr(&self) -> Paddr { - self.start_paddr() - } -} - -impl<'a> Frame { - /// Returns a reader to read data from it. - pub fn reader(&'a self) -> VmReader<'a, Infallible> { - // SAFETY: - // - The memory range points to untyped memory. - // - The frame is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the frame. - unsafe { VmReader::from_kernel_space(self.as_ptr(), self.size()) } - } - - /// Returns a writer to write data into it. - pub fn writer(&'a self) -> VmWriter<'a, Infallible> { - // SAFETY: - // - The memory range points to untyped memory. - // - The frame is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the frame. - unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.size()) } - } -} - -impl VmIo for Frame { - fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { - let read_len = writer.avail().min(self.size().saturating_sub(offset)); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?; - if max_offset > self.size() { - return Err(Error::InvalidArgs); - } - let len = self - .reader() - .skip(offset) - .read_fallible(writer) - .map_err(|(e, _)| e)?; - debug_assert!(len == read_len); - Ok(()) - } - - fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { - let write_len = reader.remain().min(self.size().saturating_sub(offset)); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?; - if max_offset > self.size() { - return Err(Error::InvalidArgs); - } - let len = self - .writer() - .skip(offset) - .write_fallible(reader) - .map_err(|(e, _)| e)?; - debug_assert!(len == write_len); - Ok(()) - } -} - -/// Metadata for a frame. -#[derive(Debug, Default)] -pub struct FrameMeta {} - -impl_page_meta!(FrameMeta); - -// Here are implementations for `xarray`. - -use core::{marker::PhantomData, ops::Deref}; - -/// `FrameRef` is a struct that can work as `&'a Frame`. -/// -/// This is solely useful for [`crate::collections::xarray`]. -pub struct FrameRef<'a> { - inner: ManuallyDrop, - _marker: PhantomData<&'a Frame>, -} - -impl Deref for FrameRef<'_> { - type Target = Frame; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -// SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer. -// The pointer is also aligned to 4. -unsafe impl xarray::ItemEntry for Frame { - type Ref<'a> - = FrameRef<'a> - where - Self: 'a; - - fn into_raw(self) -> *const () { - let ptr = self.page.ptr; - core::mem::forget(self); - ptr as *const () - } - - unsafe fn from_raw(raw: *const ()) -> Self { Self { - page: Page:: { - ptr: raw as *mut MetaSlot, - _marker: PhantomData, - }, - } - } - - unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> { - Self::Ref { - inner: ManuallyDrop::new(Frame::from_raw(raw)), + ptr: self.ptr, _marker: PhantomData, } } } + +impl Drop for Frame { + fn drop(&mut self) { + let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release); + debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); + + if last_ref_cnt == 1 { + // A fence is needed here with the same reasons stated in the implementation of + // `Arc::drop`: . + core::sync::atomic::fence(Ordering::Acquire); + + // SAFETY: this is the last reference and is about to be dropped. + unsafe { + meta::drop_last_in_place(self.ptr as *mut MetaSlot); + } + } + } +} + +/// A page with a dynamically-known usage. +/// +/// It can also be used when the user don't care about the usage of the page. +#[derive(Debug)] +pub struct AnyFrame { + ptr: *const MetaSlot, +} + +unsafe impl Send for AnyFrame {} +unsafe impl Sync for AnyFrame {} + +impl AnyFrame { + /// Forget the handle to the page. + /// + /// This is the same as [`Frame::into_raw`]. + /// + /// This will result in the page being leaked without calling the custom dropper. + /// + /// A physical address to the page is returned in case the page needs to be + /// restored using [`Self::from_raw`] later. + pub(in crate::mm) fn into_raw(self) -> Paddr { + let paddr = self.paddr(); + core::mem::forget(self); + paddr + } + + /// Restore a forgotten page from a physical address. + /// + /// # Safety + /// + /// The safety concerns are the same as [`Frame::from_raw`]. + pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self { + let vaddr = mapping::page_to_meta::(paddr); + let ptr = vaddr as *const MetaSlot; + + Self { ptr } + } + + /// Get the metadata of this page. + pub fn meta(&self) -> &dyn Any { + let slot = self.slot(); + + // SAFETY: The page metadata is valid to be borrowed immutably, since it will never be + // borrowed mutably after initialization. + let vtable_ptr = unsafe { &*slot.vtable_ptr.get() }; + + // SAFETY: The page metadata is initialized and valid. + let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() }; + + let meta_ptr: *const dyn FrameMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr); + + // SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably + // borrowed under `vtable_ptr` because the vtable is correct, it lives under the given + // lifetime, and no one will mutably borrow the page metadata after initialization. + (unsafe { &*meta_ptr }) as &dyn Any + } + + /// Get the physical address of the start of the page + pub fn paddr(&self) -> Paddr { + mapping::meta_to_page::(self.ptr as Vaddr) + } + + /// Get the paging level of this page. + pub fn level(&self) -> PagingLevel { + 1 + } + + /// Size of this page in bytes. + pub fn size(&self) -> usize { + PAGE_SIZE + } + + fn slot(&self) -> &MetaSlot { + // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an + // immutable reference to it is always safe. + unsafe { &*self.ptr } + } +} + +impl TryFrom for Frame { + type Error = AnyFrame; + + /// Try converting a [`AnyFrame`] into the statically-typed [`Frame`]. + /// + /// If the usage of the page is not the same as the expected usage, it will + /// return the dynamic page itself as is. + fn try_from(dyn_page: AnyFrame) -> Result { + if dyn_page.meta().is::() { + let result = Frame { + ptr: dyn_page.ptr, + _marker: PhantomData, + }; + let _ = ManuallyDrop::new(dyn_page); + Ok(result) + } else { + Err(dyn_page) + } + } +} + +impl From> for AnyFrame { + fn from(page: Frame) -> Self { + let result = Self { ptr: page.ptr }; + let _ = ManuallyDrop::new(page); + result + } +} + +impl From for AnyFrame { + fn from(frame: UntypedFrame) -> Self { + Frame::::from(frame).into() + } +} + +impl Clone for AnyFrame { + fn clone(&self) -> Self { + // SAFETY: We have already held a reference to the page. + unsafe { self.slot().inc_ref_count() }; + + Self { ptr: self.ptr } + } +} + +impl Drop for AnyFrame { + fn drop(&mut self) { + let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release); + debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); + + if last_ref_cnt == 1 { + // A fence is needed here with the same reasons stated in the implementation of + // `Arc::drop`: . + core::sync::atomic::fence(Ordering::Acquire); + + // SAFETY: this is the last reference and is about to be dropped. + unsafe { + meta::drop_last_in_place(self.ptr as *mut MetaSlot); + } + } + } +} + +/// Increases the reference count of the page by one. +/// +/// # Safety +/// +/// The caller should ensure the following conditions: +/// 1. The physical address must represent a valid page; +/// 2. The caller must have already held a reference to the page. +pub(in crate::mm) unsafe fn inc_page_ref_count(paddr: Paddr) { + debug_assert!(paddr % PAGE_SIZE == 0); + debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); + + let vaddr: Vaddr = mapping::page_to_meta::(paddr); + // SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking + // an immutable reference to it is always safe. + let slot = unsafe { &*(vaddr as *const MetaSlot) }; + + // SAFETY: We have already held a reference to the page. + unsafe { slot.inc_ref_count() }; +} diff --git a/ostd/src/mm/frame/segment.rs b/ostd/src/mm/frame/segment.rs index 6f28b7292..260272e3c 100644 --- a/ostd/src/mm/frame/segment.rs +++ b/ostd/src/mm/frame/segment.rs @@ -1,178 +1,191 @@ // SPDX-License-Identifier: MPL-2.0 -//! A contiguous segment of untyped memory pages. +//! A contiguous range of pages. -use core::ops::Range; +use alloc::vec::Vec; +use core::{mem::ManuallyDrop, ops::Range}; -use crate::{ - mm::{ - frame::FrameMeta, - io::{FallibleVmRead, FallibleVmWrite}, - page::ContPages, - Frame, HasPaddr, Infallible, Paddr, VmIo, VmReader, VmWriter, - }, - Error, Result, -}; +use super::{inc_page_ref_count, meta::FrameMeta, Frame}; +use crate::mm::{Paddr, PAGE_SIZE}; -/// A contiguous segment of untyped memory pages. +/// A contiguous range of physical memory pages. /// -/// A [`Segment`] object is a handle to a contiguous range of untyped memory -/// pages, and the underlying pages can be shared among multiple threads. -/// [`Segment::slice`] can be used to clone a slice of the segment (also can be -/// used to clone the entire range). Reference counts are maintained for each -/// page in the segment. So cloning the handle may not be cheap as it -/// increments the reference count of all the cloned pages. +/// This is a handle to many contiguous pages. It will be more lightweight +/// than owning an array of page handles. /// -/// Other [`Frame`] handles can also refer to the pages in the segment. And -/// the segment can be iterated over to get all the frames in it. -/// -/// To allocate a segment, use [`crate::mm::FrameAllocator`]. -/// -/// # Example -/// -/// ```rust -/// let vm_segment = FrameAllocOptions::new(2) -/// .is_contiguous(true) -/// .alloc_contiguous()?; -/// vm_segment.write_bytes(0, buf)?; -/// ``` +/// The ownership is achieved by the reference counting mechanism of pages. +/// When constructing a `Segment`, the page handles are created then +/// forgotten, leaving the reference count. When dropping a it, the page +/// handles are restored and dropped, decrementing the reference count. #[derive(Debug)] -pub struct Segment { - pages: ContPages, +pub struct Segment { + range: Range, + _marker: core::marker::PhantomData, } -impl HasPaddr for Segment { - fn paddr(&self) -> Paddr { - self.pages.start_paddr() - } -} - -impl Clone for Segment { - fn clone(&self) -> Self { - Self { - pages: self.pages.clone(), +impl Drop for Segment { + fn drop(&mut self) { + for paddr in self.range.clone().step_by(PAGE_SIZE) { + // SAFETY: for each page there would be a forgotten handle + // when creating the `Segment` object. + drop(unsafe { Frame::::from_raw(paddr) }); } } } -impl Segment { - /// Returns the start physical address. - pub fn start_paddr(&self) -> Paddr { - self.pages.start_paddr() +impl Clone for Segment { + fn clone(&self) -> Self { + for paddr in self.range.clone().step_by(PAGE_SIZE) { + // SAFETY: for each page there would be a forgotten handle + // when creating the `Segment` object, so we already have + // reference counts for the pages. + unsafe { inc_page_ref_count(paddr) }; + } + Self { + range: self.range.clone(), + _marker: core::marker::PhantomData, + } } +} - /// Returns the end physical address. - pub fn end_paddr(&self) -> Paddr { - self.pages.end_paddr() - } - - /// Returns the number of bytes in it. - pub fn nbytes(&self) -> usize { - self.pages.nbytes() - } - - /// Split the segment into two at the given byte offset from the start. +impl Segment { + /// Creates a new `Segment` from unused pages. /// - /// The resulting segments cannot be empty. So the byte offset cannot be - /// neither zero nor the length of the segment. + /// The caller must provide a closure to initialize metadata for all the pages. + /// The closure receives the physical address of the page and returns the + /// metadata, which is similar to [`core::array::from_fn`]. /// /// # Panics /// - /// The function panics if the byte offset is out of bounds, at either ends, or + /// The function panics if: + /// - the physical address is invalid or not aligned; + /// - any of the pages are already in use. + pub fn from_unused(range: Range, mut metadata_fn: F) -> Self + where + F: FnMut(Paddr) -> M, + { + for paddr in range.clone().step_by(PAGE_SIZE) { + let _ = ManuallyDrop::new(Frame::::from_unused(paddr, metadata_fn(paddr))); + } + Self { + range, + _marker: core::marker::PhantomData, + } + } + + /// Gets the start physical address of the contiguous pages. + pub fn start_paddr(&self) -> Paddr { + self.range.start + } + + /// Gets the end physical address of the contiguous pages. + pub fn end_paddr(&self) -> Paddr { + self.range.end + } + + /// Gets the length in bytes of the contiguous pages. + pub fn nbytes(&self) -> usize { + self.range.end - self.range.start + } + + /// Splits the pages into two at the given byte offset from the start. + /// + /// The resulting pages cannot be empty. So the offset cannot be neither + /// zero nor the length of the pages. + /// + /// # Panics + /// + /// The function panics if the offset is out of bounds, at either ends, or /// not base-page-aligned. pub fn split(self, offset: usize) -> (Self, Self) { - let (left, right) = self.pages.split(offset); - (Self { pages: left }, Self { pages: right }) + assert!(offset % PAGE_SIZE == 0); + assert!(0 < offset && offset < self.nbytes()); + + let old = ManuallyDrop::new(self); + let at = old.range.start + offset; + + ( + Self { + range: old.range.start..at, + _marker: core::marker::PhantomData, + }, + Self { + range: at..old.range.end, + _marker: core::marker::PhantomData, + }, + ) } - /// Get an extra handle to the segment in the byte range. + /// Gets an extra handle to the pages in the byte offset range. /// - /// The sliced byte range in indexed by the offset from the start of the - /// segment. The resulting segment holds extra reference counts. + /// The sliced byte offset range in indexed by the offset from the start of + /// the contiguous pages. The resulting pages holds extra reference counts. /// /// # Panics /// - /// The function panics if the byte range is out of bounds, or if any of - /// the ends of the byte range is not base-page aligned. + /// The function panics if the byte offset range is out of bounds, or if + /// any of the ends of the byte offset range is not base-page aligned. pub fn slice(&self, range: &Range) -> Self { + assert!(range.start % PAGE_SIZE == 0 && range.end % PAGE_SIZE == 0); + let start = self.range.start + range.start; + let end = self.range.start + range.end; + assert!(start <= end && end <= self.range.end); + + for paddr in (start..end).step_by(PAGE_SIZE) { + // SAFETY: We already have reference counts for the pages since + // for each page there would be a forgotten handle when creating + // the `Segment` object. + unsafe { inc_page_ref_count(paddr) }; + } + Self { - pages: self.pages.slice(range), + range: start..end, + _marker: core::marker::PhantomData, } } - - /// Gets a [`VmReader`] to read from the segment from the beginning to the end. - pub fn reader(&self) -> VmReader<'_, Infallible> { - let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8; - // SAFETY: - // - The memory range points to untyped memory. - // - The segment is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the segment. - unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) } - } - - /// Gets a [`VmWriter`] to write to the segment from the beginning to the end. - pub fn writer(&self) -> VmWriter<'_, Infallible> { - let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8; - // SAFETY: - // - The memory range points to untyped memory. - // - The segment is alive during the lifetime `'a`. - // - Using `VmReader` and `VmWriter` is the only way to access the segment. - unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) } - } } -impl From for Segment { - fn from(frame: Frame) -> Self { +impl From> for Segment { + fn from(page: Frame) -> Self { + let pa = page.paddr(); + let _ = ManuallyDrop::new(page); Self { - pages: ContPages::from(frame.page), + range: pa..pa + PAGE_SIZE, + _marker: core::marker::PhantomData, } } } -impl From> for Segment { - fn from(pages: ContPages) -> Self { - Self { pages } +impl From> for Vec> { + fn from(pages: Segment) -> Self { + let vector = pages + .range + .clone() + .step_by(PAGE_SIZE) + .map(|i| + // SAFETY: for each page there would be a forgotten handle + // when creating the `Segment` object. + unsafe { Frame::::from_raw(i) }) + .collect(); + let _ = ManuallyDrop::new(pages); + vector } } -impl VmIo for Segment { - fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { - let read_len = writer.avail(); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?; - if max_offset > self.nbytes() { - return Err(Error::InvalidArgs); - } - let len = self - .reader() - .skip(offset) - .read_fallible(writer) - .map_err(|(e, _)| e)?; - debug_assert!(len == read_len); - Ok(()) - } - - fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { - let write_len = reader.remain(); - // Do bound check with potential integer overflow in mind - let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?; - if max_offset > self.nbytes() { - return Err(Error::InvalidArgs); - } - let len = self - .writer() - .skip(offset) - .write_fallible(reader) - .map_err(|(e, _)| e)?; - debug_assert!(len == write_len); - Ok(()) - } -} - -impl Iterator for Segment { - type Item = Frame; +impl Iterator for Segment { + type Item = Frame; fn next(&mut self) -> Option { - self.pages.next().map(|page| Frame { page }) + if self.range.start < self.range.end { + // SAFETY: each page in the range would be a handle forgotten + // when creating the `Segment` object. + let page = unsafe { Frame::::from_raw(self.range.start) }; + self.range.start += PAGE_SIZE; + // The end cannot be non-page-aligned. + debug_assert!(self.range.start <= self.range.end); + Some(page) + } else { + None + } } } diff --git a/ostd/src/mm/frame/untyped/mod.rs b/ostd/src/mm/frame/untyped/mod.rs new file mode 100644 index 000000000..a929250d3 --- /dev/null +++ b/ostd/src/mm/frame/untyped/mod.rs @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Untyped physical memory management. +//! +//! A frame is a special page that is _untyped_ memory. +//! It is used to store data irrelevant to the integrity of the kernel. +//! All pages mapped to the virtual address space of the users are backed by +//! frames. Frames, with all the properties of pages, can additionally be safely +//! read and written by the kernel or the user. + +pub mod options; +mod segment; + +use core::mem::ManuallyDrop; + +pub use segment::UntypedSegment; + +use super::{ + meta::{impl_frame_meta_for, MetaSlot}, + AnyFrame, Frame, +}; +use crate::{ + mm::{ + io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter}, + paddr_to_vaddr, HasPaddr, Infallible, Paddr, PAGE_SIZE, + }, + Error, Result, +}; + +/// A handle to a physical memory page of untyped memory. +/// +/// An instance of `UntypedFrame` is a handle to a page frame (a physical memory +/// page). A cloned `UntypedFrame` refers to the same page frame as the original. +/// As the original and cloned instances point to the same physical address, +/// they are treated as equal to each other. Behind the scene, a reference +/// counter is maintained for each page frame so that when all instances of +/// `UntypedFrame` that refer to the same page frame are dropped, the page frame +/// will be globally freed. +#[derive(Debug, Clone)] +pub struct UntypedFrame { + page: Frame, +} + +impl UntypedFrame { + /// Returns the physical address of the page frame. + pub fn start_paddr(&self) -> Paddr { + self.page.paddr() + } + + /// Returns the end physical address of the page frame. + pub fn end_paddr(&self) -> Paddr { + self.start_paddr() + PAGE_SIZE + } + + /// Returns the size of the frame + pub const fn size(&self) -> usize { + self.page.size() + } + + /// Returns a raw pointer to the starting virtual address of the frame. + pub fn as_ptr(&self) -> *const u8 { + paddr_to_vaddr(self.start_paddr()) as *const u8 + } + + /// Returns a mutable raw pointer to the starting virtual address of the frame. + pub fn as_mut_ptr(&self) -> *mut u8 { + paddr_to_vaddr(self.start_paddr()) as *mut u8 + } + + /// Copies the content of `src` to the frame. + pub fn copy_from(&self, src: &UntypedFrame) { + if self.paddr() == src.paddr() { + return; + } + // SAFETY: the source and the destination does not overlap. + unsafe { + core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size()); + } + } + + /// Get the reference count of the frame. + /// + /// It returns the number of all references to the page, including all the + /// existing page handles ([`UntypedFrame`]) and all the mappings in the page + /// table that points to the page. + /// + /// # Safety + /// + /// The function is safe to call, but using it requires extra care. The + /// reference count can be changed by other threads at any time including + /// potentially between calling this method and acting on the result. + pub fn reference_count(&self) -> u32 { + self.page.reference_count() + } +} + +impl From> for UntypedFrame { + fn from(page: Frame) -> Self { + Self { page } + } +} + +impl TryFrom for UntypedFrame { + type Error = AnyFrame; + + /// Try converting a [`AnyFrame`] into the statically-typed [`UntypedFrame`]. + /// + /// If the dynamic page is not used as an untyped page frame, it will + /// return the dynamic page itself as is. + fn try_from(page: AnyFrame) -> core::result::Result { + page.try_into().map(|p: Frame| p.into()) + } +} + +impl From for Frame { + fn from(frame: UntypedFrame) -> Self { + frame.page + } +} + +impl HasPaddr for UntypedFrame { + fn paddr(&self) -> Paddr { + self.start_paddr() + } +} + +impl<'a> UntypedFrame { + /// Returns a reader to read data from it. + pub fn reader(&'a self) -> VmReader<'a, Infallible> { + // SAFETY: + // - The memory range points to untyped memory. + // - The frame is alive during the lifetime `'a`. + // - Using `VmReader` and `VmWriter` is the only way to access the frame. + unsafe { VmReader::from_kernel_space(self.as_ptr(), self.size()) } + } + + /// Returns a writer to write data into it. + pub fn writer(&'a self) -> VmWriter<'a, Infallible> { + // SAFETY: + // - The memory range points to untyped memory. + // - The frame is alive during the lifetime `'a`. + // - Using `VmReader` and `VmWriter` is the only way to access the frame. + unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.size()) } + } +} + +impl VmIo for UntypedFrame { + fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { + let read_len = writer.avail().min(self.size().saturating_sub(offset)); + // Do bound check with potential integer overflow in mind + let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?; + if max_offset > self.size() { + return Err(Error::InvalidArgs); + } + let len = self + .reader() + .skip(offset) + .read_fallible(writer) + .map_err(|(e, _)| e)?; + debug_assert!(len == read_len); + Ok(()) + } + + fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { + let write_len = reader.remain().min(self.size().saturating_sub(offset)); + // Do bound check with potential integer overflow in mind + let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?; + if max_offset > self.size() { + return Err(Error::InvalidArgs); + } + let len = self + .writer() + .skip(offset) + .write_fallible(reader) + .map_err(|(e, _)| e)?; + debug_assert!(len == write_len); + Ok(()) + } +} + +/// Metadata for a frame. +#[derive(Debug, Default)] +pub struct UntypedMeta {} + +impl_frame_meta_for!(UntypedMeta); + +// Here are implementations for `xarray`. + +use core::{marker::PhantomData, ops::Deref}; + +/// `FrameRef` is a struct that can work as `&'a UntypedFrame`. +/// +/// This is solely useful for [`crate::collections::xarray`]. +pub struct FrameRef<'a> { + inner: ManuallyDrop, + _marker: PhantomData<&'a UntypedFrame>, +} + +impl Deref for FrameRef<'_> { + type Target = UntypedFrame; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +// SAFETY: `UntypedFrame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer. +// The pointer is also aligned to 4. +unsafe impl xarray::ItemEntry for UntypedFrame { + type Ref<'a> + = FrameRef<'a> + where + Self: 'a; + + fn into_raw(self) -> *const () { + let ptr = self.page.ptr; + core::mem::forget(self); + ptr as *const () + } + + unsafe fn from_raw(raw: *const ()) -> Self { + Self { + page: Frame:: { + ptr: raw as *mut MetaSlot, + _marker: PhantomData, + }, + } + } + + unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> { + Self::Ref { + inner: ManuallyDrop::new(UntypedFrame::from_raw(raw)), + _marker: PhantomData, + } + } +} diff --git a/ostd/src/mm/frame/options.rs b/ostd/src/mm/frame/untyped/options.rs similarity index 81% rename from ostd/src/mm/frame/options.rs rename to ostd/src/mm/frame/untyped/options.rs index 6216575c4..966210fd8 100644 --- a/ostd/src/mm/frame/options.rs +++ b/ostd/src/mm/frame/untyped/options.rs @@ -2,9 +2,9 @@ //! Options for allocating frames -use super::{Frame, Segment}; +use super::{UntypedFrame, UntypedSegment}; use crate::{ - mm::{frame::FrameMeta, page, PAGE_SIZE}, + mm::{frame, frame::untyped::UntypedMeta, PAGE_SIZE}, prelude::*, Error, }; @@ -52,13 +52,13 @@ impl FrameAllocOptions { } /// Allocates a single page frame according to the given options. - pub fn alloc_single(&self) -> Result { + pub fn alloc_single(&self) -> Result { if self.nframes != 1 { return Err(Error::InvalidArgs); } - let page = page::allocator::alloc_single(FrameMeta::default()).ok_or(Error::NoMemory)?; - let frame = Frame { page }; + let page = frame::allocator::alloc_single(UntypedMeta::default()).ok_or(Error::NoMemory)?; + let frame = UntypedFrame { page }; if !self.uninit { frame.writer().fill(0); } @@ -68,17 +68,19 @@ impl FrameAllocOptions { /// Allocates a contiguous range of page frames according to the given options. /// - /// The returned [`Segment`] contains at least one page frame. - pub fn alloc_contiguous(&self) -> Result { + /// The returned [`UntypedSegment`] contains at least one page frame. + pub fn alloc_contiguous(&self) -> Result { // It's no use to checking `self.is_contiguous` here. if self.nframes == 0 { return Err(Error::InvalidArgs); } - let segment: Segment = - page::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| FrameMeta::default()) - .ok_or(Error::NoMemory)? - .into(); + let segment: UntypedSegment = + frame::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| { + UntypedMeta::default() + }) + .ok_or(Error::NoMemory)? + .into(); if !self.uninit { segment.writer().fill(0); } diff --git a/ostd/src/mm/frame/untyped/segment.rs b/ostd/src/mm/frame/untyped/segment.rs new file mode 100644 index 000000000..977f5035a --- /dev/null +++ b/ostd/src/mm/frame/untyped/segment.rs @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! A contiguous segment of untyped memory pages. + +use core::ops::Range; + +use crate::{ + mm::{ + frame::{untyped::UntypedMeta, Segment}, + io::{FallibleVmRead, FallibleVmWrite}, + HasPaddr, Infallible, Paddr, UntypedFrame, VmIo, VmReader, VmWriter, + }, + Error, Result, +}; + +/// A contiguous segment of untyped memory pages. +/// +/// A [`UntypedSegment`] object is a handle to a contiguous range of untyped memory +/// pages, and the underlying pages can be shared among multiple threads. +/// [`UntypedSegment::slice`] can be used to clone a slice of the segment (also can be +/// used to clone the entire range). Reference counts are maintained for each +/// page in the segment. So cloning the handle may not be cheap as it +/// increments the reference count of all the cloned pages. +/// +/// Other [`UntypedFrame`] handles can also refer to the pages in the segment. And +/// the segment can be iterated over to get all the frames in it. +/// +/// To allocate a segment, use [`crate::mm::FrameAllocator`]. +/// +/// # Example +/// +/// ```rust +/// let vm_segment = FrameAllocOptions::new(2) +/// .is_contiguous(true) +/// .alloc_contiguous()?; +/// vm_segment.write_bytes(0, buf)?; +/// ``` +#[derive(Debug)] +pub struct UntypedSegment { + pages: Segment, +} + +impl HasPaddr for UntypedSegment { + fn paddr(&self) -> Paddr { + self.pages.start_paddr() + } +} + +impl Clone for UntypedSegment { + fn clone(&self) -> Self { + Self { + pages: self.pages.clone(), + } + } +} + +impl UntypedSegment { + /// Returns the start physical address. + pub fn start_paddr(&self) -> Paddr { + self.pages.start_paddr() + } + + /// Returns the end physical address. + pub fn end_paddr(&self) -> Paddr { + self.pages.end_paddr() + } + + /// Returns the number of bytes in it. + pub fn nbytes(&self) -> usize { + self.pages.nbytes() + } + + /// Split the segment into two at the given byte offset from the start. + /// + /// The resulting segments cannot be empty. So the byte offset cannot be + /// neither zero nor the length of the segment. + /// + /// # Panics + /// + /// The function panics if the byte offset is out of bounds, at either ends, or + /// not base-page-aligned. + pub fn split(self, offset: usize) -> (Self, Self) { + let (left, right) = self.pages.split(offset); + (Self { pages: left }, Self { pages: right }) + } + + /// Get an extra handle to the segment in the byte range. + /// + /// The sliced byte range in indexed by the offset from the start of the + /// segment. The resulting segment holds extra reference counts. + /// + /// # Panics + /// + /// The function panics if the byte range is out of bounds, or if any of + /// the ends of the byte range is not base-page aligned. + pub fn slice(&self, range: &Range) -> Self { + Self { + pages: self.pages.slice(range), + } + } + + /// Gets a [`VmReader`] to read from the segment from the beginning to the end. + pub fn reader(&self) -> VmReader<'_, Infallible> { + let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8; + // SAFETY: + // - The memory range points to untyped memory. + // - The segment is alive during the lifetime `'a`. + // - Using `VmReader` and `VmWriter` is the only way to access the segment. + unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) } + } + + /// Gets a [`VmWriter`] to write to the segment from the beginning to the end. + pub fn writer(&self) -> VmWriter<'_, Infallible> { + let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8; + // SAFETY: + // - The memory range points to untyped memory. + // - The segment is alive during the lifetime `'a`. + // - Using `VmReader` and `VmWriter` is the only way to access the segment. + unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) } + } +} + +impl From for UntypedSegment { + fn from(frame: UntypedFrame) -> Self { + Self { + pages: Segment::from(frame.page), + } + } +} + +impl From> for UntypedSegment { + fn from(pages: Segment) -> Self { + Self { pages } + } +} + +impl VmIo for UntypedSegment { + fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { + let read_len = writer.avail(); + // Do bound check with potential integer overflow in mind + let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?; + if max_offset > self.nbytes() { + return Err(Error::InvalidArgs); + } + let len = self + .reader() + .skip(offset) + .read_fallible(writer) + .map_err(|(e, _)| e)?; + debug_assert!(len == read_len); + Ok(()) + } + + fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> { + let write_len = reader.remain(); + // Do bound check with potential integer overflow in mind + let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?; + if max_offset > self.nbytes() { + return Err(Error::InvalidArgs); + } + let len = self + .writer() + .skip(offset) + .write_fallible(reader) + .map_err(|(e, _)| e)?; + debug_assert!(len == write_len); + Ok(()) + } +} + +impl Iterator for UntypedSegment { + type Item = UntypedFrame; + + fn next(&mut self) -> Option { + self.pages.next().map(|page| UntypedFrame { page }) + } +} diff --git a/ostd/src/mm/heap_allocator/mod.rs b/ostd/src/mm/heap_allocator/mod.rs index 5a75892fc..c42beaa08 100644 --- a/ostd/src/mm/heap_allocator/mod.rs +++ b/ostd/src/mm/heap_allocator/mod.rs @@ -11,7 +11,7 @@ use spin::Once; use super::paddr_to_vaddr; use crate::{ - mm::{page::allocator::PAGE_ALLOCATOR, PAGE_SIZE}, + mm::{frame::allocator::PAGE_ALLOCATOR, PAGE_SIZE}, prelude::*, sync::SpinLock, trap::disable_local, diff --git a/ostd/src/mm/io.rs b/ostd/src/mm/io.rs index 05afed06c..dcd20525a 100644 --- a/ostd/src/mm/io.rs +++ b/ostd/src/mm/io.rs @@ -7,11 +7,11 @@ //! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and //! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_. //! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory -//! (e.g., `&[u8]`) or untyped memory (e.g, [`Frame`]). Behind the scene, `VmReader` and `VmWriter` +//! (e.g., `&[u8]`) or untyped memory (e.g, [`UntypedFrame`]). Behind the scene, `VmReader` and `VmWriter` //! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose //! safety depends on whether the given memory regions are _valid_ or not. //! -//! [`Frame`]: crate::mm::Frame +//! [`UntypedFrame`]: crate::mm::UntypedFrame //! [`from_user_space`]: `VmReader::from_user_space` //! [`from_kernel_space`]: `VmReader::from_kernel_space` //! @@ -58,7 +58,7 @@ use crate::{ }; /// A trait that enables reading/writing data from/to a VM object, -/// e.g., [`Segment`], [`Vec`] and [`Frame`]. +/// e.g., [`UntypedSegment`], [`Vec`] and [`UntypedFrame`]. /// /// # Concurrency /// @@ -67,8 +67,8 @@ use crate::{ /// desire predictability or atomicity, the users should add extra mechanism /// for such properties. /// -/// [`Segment`]: crate::mm::Segment -/// [`Frame`]: crate::mm::Frame +/// [`UntypedSegment`]: crate::mm::UntypedSegment +/// [`UntypedFrame`]: crate::mm::UntypedFrame pub trait VmIo: Send + Sync { /// Reads requested data at a specified offset into a given `VmWriter`. /// diff --git a/ostd/src/mm/kspace/kvirt_area.rs b/ostd/src/mm/kspace/kvirt_area.rs index 3ad794973..8e80b1252 100644 --- a/ostd/src/mm/kspace/kvirt_area.rs +++ b/ostd/src/mm/kspace/kvirt_area.rs @@ -11,7 +11,7 @@ use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE}; use crate::{ cpu::CpuSet, mm::{ - page::{meta::PageMeta, DynPage, Page}, + frame::{meta::FrameMeta, AnyFrame, Frame}, page_prop::PageProperty, page_table::PageTableItem, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, @@ -204,10 +204,10 @@ impl KVirtArea { impl KVirtArea { /// Maps pages into the kernel virtual area. - pub fn map_pages( + pub fn map_pages( &mut self, range: Range, - pages: impl Iterator>, + pages: impl Iterator>, prop: PageProperty, ) { assert!(self.start() <= range.start && self.end() >= range.end); @@ -232,7 +232,7 @@ impl KVirtArea { /// /// This function returns None if the address is not mapped (`NotMapped`), /// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page. - pub fn get_page(&self, addr: Vaddr) -> Option { + pub fn get_page(&self, addr: Vaddr) -> Option { let query_result = self.query_page(addr); match query_result { PageTableItem::Mapped { diff --git a/ostd/src/mm/kspace/mod.rs b/ostd/src/mm/kspace/mod.rs index 2a1a59b71..bc3e7d2a1 100644 --- a/ostd/src/mm/kspace/mod.rs +++ b/ostd/src/mm/kspace/mod.rs @@ -47,11 +47,11 @@ use log::info; use spin::Once; use super::{ - nr_subpage_per_huge, - page::{ - meta::{impl_page_meta, mapping, MetaPageMeta}, - ContPages, Page, + frame::{ + meta::{impl_frame_meta_for, mapping, MetaPageMeta}, + Frame, Segment, }, + nr_subpage_per_huge, page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, page_table::{KernelMode, PageTable}, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE, @@ -111,7 +111,7 @@ pub fn paddr_to_vaddr(pa: Paddr) -> usize { /// Returns whether the given address should be mapped as tracked. /// -/// About what is tracked mapping, see [`crate::mm::page::meta::MapTrackingStatus`]. +/// About what is tracked mapping, see [`crate::mm::frame::meta::MapTrackingStatus`]. pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool { !(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr)) } @@ -131,7 +131,7 @@ pub static KERNEL_PAGE_TABLE: Once) { +pub fn init_kernel_page_table(meta_pages: Segment) { info!("Initializing the kernel page table"); let regions = crate::boot::memory_regions(); @@ -214,7 +214,7 @@ pub fn init_kernel_page_table(meta_pages: ContPages) { }; let mut cursor = kpt.cursor_mut(&from).unwrap(); for frame_paddr in to.step_by(PAGE_SIZE) { - let page = Page::::from_unused(frame_paddr, KernelMeta::default()); + let page = Frame::::from_unused(frame_paddr, KernelMeta::default()); // SAFETY: we are doing mappings for the kernel. unsafe { let _old = cursor.map(page.into(), prop); @@ -251,4 +251,4 @@ pub unsafe fn activate_kernel_page_table() { #[derive(Debug, Default)] pub struct KernelMeta {} -impl_page_meta!(KernelMeta); +impl_frame_meta_for!(KernelMeta); diff --git a/ostd/src/mm/mod.rs b/ostd/src/mm/mod.rs index bd1f4ec9e..f6a61e55f 100644 --- a/ostd/src/mm/mod.rs +++ b/ostd/src/mm/mod.rs @@ -14,7 +14,6 @@ pub(crate) mod heap_allocator; mod io; pub(crate) mod kspace; mod offset; -pub(crate) mod page; pub(crate) mod page_prop; pub(crate) mod page_table; pub mod stat; @@ -25,7 +24,7 @@ use core::{fmt::Debug, ops::Range}; pub use self::{ dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr}, - frame::{options::FrameAllocOptions, Frame, Segment}, + frame::untyped::{options::FrameAllocOptions, UntypedFrame, UntypedSegment}, io::{ Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader, VmWriter, @@ -34,7 +33,7 @@ pub use self::{ vm_space::VmSpace, }; pub(crate) use self::{ - kspace::paddr_to_vaddr, page::meta::init as init_page_meta, page_prop::PrivilegedPageFlags, + frame::meta::init as init_page_meta, kspace::paddr_to_vaddr, page_prop::PrivilegedPageFlags, page_table::PageTable, }; use crate::arch::mm::PagingConsts; diff --git a/ostd/src/mm/page/cont_pages.rs b/ostd/src/mm/page/cont_pages.rs deleted file mode 100644 index a6a95ce8e..000000000 --- a/ostd/src/mm/page/cont_pages.rs +++ /dev/null @@ -1,191 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//! A contiguous range of pages. - -use alloc::vec::Vec; -use core::{mem::ManuallyDrop, ops::Range}; - -use super::{inc_page_ref_count, meta::PageMeta, Page}; -use crate::mm::{Paddr, PAGE_SIZE}; - -/// A contiguous range of physical memory pages. -/// -/// This is a handle to many contiguous pages. It will be more lightweight -/// than owning an array of page handles. -/// -/// The ownership is achieved by the reference counting mechanism of pages. -/// When constructing a `ContPages`, the page handles are created then -/// forgotten, leaving the reference count. When dropping a it, the page -/// handles are restored and dropped, decrementing the reference count. -#[derive(Debug)] -pub struct ContPages { - range: Range, - _marker: core::marker::PhantomData, -} - -impl Drop for ContPages { - fn drop(&mut self) { - for paddr in self.range.clone().step_by(PAGE_SIZE) { - // SAFETY: for each page there would be a forgotten handle - // when creating the `ContPages` object. - drop(unsafe { Page::::from_raw(paddr) }); - } - } -} - -impl Clone for ContPages { - fn clone(&self) -> Self { - for paddr in self.range.clone().step_by(PAGE_SIZE) { - // SAFETY: for each page there would be a forgotten handle - // when creating the `ContPages` object, so we already have - // reference counts for the pages. - unsafe { inc_page_ref_count(paddr) }; - } - Self { - range: self.range.clone(), - _marker: core::marker::PhantomData, - } - } -} - -impl ContPages { - /// Creates a new `ContPages` from unused pages. - /// - /// The caller must provide a closure to initialize metadata for all the pages. - /// The closure receives the physical address of the page and returns the - /// metadata, which is similar to [`core::array::from_fn`]. - /// - /// # Panics - /// - /// The function panics if: - /// - the physical address is invalid or not aligned; - /// - any of the pages are already in use. - pub fn from_unused(range: Range, mut metadata_fn: F) -> Self - where - F: FnMut(Paddr) -> M, - { - for paddr in range.clone().step_by(PAGE_SIZE) { - let _ = ManuallyDrop::new(Page::::from_unused(paddr, metadata_fn(paddr))); - } - Self { - range, - _marker: core::marker::PhantomData, - } - } - - /// Gets the start physical address of the contiguous pages. - pub fn start_paddr(&self) -> Paddr { - self.range.start - } - - /// Gets the end physical address of the contiguous pages. - pub fn end_paddr(&self) -> Paddr { - self.range.end - } - - /// Gets the length in bytes of the contiguous pages. - pub fn nbytes(&self) -> usize { - self.range.end - self.range.start - } - - /// Splits the pages into two at the given byte offset from the start. - /// - /// The resulting pages cannot be empty. So the offset cannot be neither - /// zero nor the length of the pages. - /// - /// # Panics - /// - /// The function panics if the offset is out of bounds, at either ends, or - /// not base-page-aligned. - pub fn split(self, offset: usize) -> (Self, Self) { - assert!(offset % PAGE_SIZE == 0); - assert!(0 < offset && offset < self.nbytes()); - - let old = ManuallyDrop::new(self); - let at = old.range.start + offset; - - ( - Self { - range: old.range.start..at, - _marker: core::marker::PhantomData, - }, - Self { - range: at..old.range.end, - _marker: core::marker::PhantomData, - }, - ) - } - - /// Gets an extra handle to the pages in the byte offset range. - /// - /// The sliced byte offset range in indexed by the offset from the start of - /// the contiguous pages. The resulting pages holds extra reference counts. - /// - /// # Panics - /// - /// The function panics if the byte offset range is out of bounds, or if - /// any of the ends of the byte offset range is not base-page aligned. - pub fn slice(&self, range: &Range) -> Self { - assert!(range.start % PAGE_SIZE == 0 && range.end % PAGE_SIZE == 0); - let start = self.range.start + range.start; - let end = self.range.start + range.end; - assert!(start <= end && end <= self.range.end); - - for paddr in (start..end).step_by(PAGE_SIZE) { - // SAFETY: We already have reference counts for the pages since - // for each page there would be a forgotten handle when creating - // the `ContPages` object. - unsafe { inc_page_ref_count(paddr) }; - } - - Self { - range: start..end, - _marker: core::marker::PhantomData, - } - } -} - -impl From> for ContPages { - fn from(page: Page) -> Self { - let pa = page.paddr(); - let _ = ManuallyDrop::new(page); - Self { - range: pa..pa + PAGE_SIZE, - _marker: core::marker::PhantomData, - } - } -} - -impl From> for Vec> { - fn from(pages: ContPages) -> Self { - let vector = pages - .range - .clone() - .step_by(PAGE_SIZE) - .map(|i| - // SAFETY: for each page there would be a forgotten handle - // when creating the `ContPages` object. - unsafe { Page::::from_raw(i) }) - .collect(); - let _ = ManuallyDrop::new(pages); - vector - } -} - -impl Iterator for ContPages { - type Item = Page; - - fn next(&mut self) -> Option { - if self.range.start < self.range.end { - // SAFETY: each page in the range would be a handle forgotten - // when creating the `ContPages` object. - let page = unsafe { Page::::from_raw(self.range.start) }; - self.range.start += PAGE_SIZE; - // The end cannot be non-page-aligned. - debug_assert!(self.range.start <= self.range.end); - Some(page) - } else { - None - } - } -} diff --git a/ostd/src/mm/page/mod.rs b/ostd/src/mm/page/mod.rs deleted file mode 100644 index 0ab6374e9..000000000 --- a/ostd/src/mm/page/mod.rs +++ /dev/null @@ -1,379 +0,0 @@ -// SPDX-License-Identifier: MPL-2.0 - -//! Physical memory page management. -//! -//! A page is an aligned, contiguous range of bytes in physical memory. The sizes -//! of base pages and huge pages are architecture-dependent. A page can be mapped -//! to a virtual address using the page table. -//! -//! Pages can be accessed through page handles, namely, [`Page`]. A page handle -//! is a reference-counted handle to a page. When all handles to a page are dropped, -//! the page is released and can be reused. -//! -//! Pages can have dedicated metadata, which is implemented in the [`meta`] module. -//! The reference count and usage of a page are stored in the metadata as well, leaving -//! the handle only a pointer to the metadata. - -pub mod allocator; -mod cont_pages; -pub mod meta; - -use core::{ - any::Any, - marker::PhantomData, - mem::ManuallyDrop, - sync::atomic::{AtomicUsize, Ordering}, -}; - -pub use cont_pages::ContPages; -use meta::{ - mapping, MetaSlot, PageMeta, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED, -}; - -use super::{frame::FrameMeta, Frame, PagingLevel, PAGE_SIZE}; -use crate::mm::{Paddr, PagingConsts, Vaddr}; - -static MAX_PADDR: AtomicUsize = AtomicUsize::new(0); - -/// A page with a statically-known usage, whose metadata is represented by `M`. -#[derive(Debug)] -pub struct Page { - pub(super) ptr: *const MetaSlot, - pub(super) _marker: PhantomData, -} - -unsafe impl Send for Page {} - -unsafe impl Sync for Page {} - -impl Page { - /// Get a `Page` handle with a specific usage from a raw, unused page. - /// - /// The caller should provide the initial metadata of the page. - /// - /// # Panics - /// - /// The function panics if: - /// - the physical address is out of bound or not aligned; - /// - the page is already in use. - pub fn from_unused(paddr: Paddr, metadata: M) -> Self { - assert!(paddr % PAGE_SIZE == 0); - assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); - - // Checking unsafe preconditions of the `PageMeta` trait. - debug_assert!(size_of::() <= PAGE_METADATA_MAX_SIZE); - debug_assert!(align_of::() <= PAGE_METADATA_MAX_ALIGN); - - let vaddr = mapping::page_to_meta::(paddr); - let ptr = vaddr as *const MetaSlot; - - // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an - // immutable reference to it is always safe. - let slot = unsafe { &*ptr }; - - // `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata - // initialization won't be reordered before this memory compare-and-exchange. - slot.ref_count - .compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed) - .expect("Page already in use when trying to get a new handle"); - - // SAFETY: We have exclusive access to the page metadata. - let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() }; - vtable_ptr.write(core::ptr::metadata(&metadata as &dyn PageMeta)); - - // SAFETY: - // 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the - // metadata storage. - // 2. The size and the alignment of the metadata storage is large enough to hold `M` - // (guaranteed by the safety requirement of the `PageMeta` trait). - // 3. We have exclusive access to the metadata storage (guaranteed by the reference count). - unsafe { ptr.cast::().cast_mut().write(metadata) }; - - // Assuming no one can create a `Page` instance directly from the page address, `Relaxed` - // is fine here. Otherwise, we should use `Release` to ensure that the metadata - // initialization won't be reordered after this memory store. - slot.ref_count.store(1, Ordering::Relaxed); - - Self { - ptr, - _marker: PhantomData, - } - } - - /// Forget the handle to the page. - /// - /// This will result in the page being leaked without calling the custom dropper. - /// - /// A physical address to the page is returned in case the page needs to be - /// restored using [`Page::from_raw`] later. This is useful when some architectural - /// data structures need to hold the page handle such as the page table. - #[allow(unused)] - pub(in crate::mm) fn into_raw(self) -> Paddr { - let paddr = self.paddr(); - core::mem::forget(self); - paddr - } - - /// Restore a forgotten `Page` from a physical address. - /// - /// # Safety - /// - /// The caller should only restore a `Page` that was previously forgotten using - /// [`Page::into_raw`]. - /// - /// And the restoring operation should only be done once for a forgotten - /// `Page`. Otherwise double-free will happen. - /// - /// Also, the caller ensures that the usage of the page is correct. There's - /// no checking of the usage in this function. - pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self { - let vaddr = mapping::page_to_meta::(paddr); - let ptr = vaddr as *const MetaSlot; - - Self { - ptr, - _marker: PhantomData, - } - } - - /// Get the physical address. - pub fn paddr(&self) -> Paddr { - mapping::meta_to_page::(self.ptr as Vaddr) - } - - /// Get the paging level of this page. - /// - /// This is the level of the page table entry that maps the frame, - /// which determines the size of the frame. - /// - /// Currently, the level is always 1, which means the frame is a regular - /// page frame. - pub const fn level(&self) -> PagingLevel { - 1 - } - - /// Size of this page in bytes. - pub const fn size(&self) -> usize { - PAGE_SIZE - } - - /// Get the metadata of this page. - pub fn meta(&self) -> &M { - // SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably - // borrowed as `M` because the type is correct, it lives under the given lifetime, and no - // one will mutably borrow the page metadata after initialization. - unsafe { &*self.ptr.cast() } - } - - /// Get the reference count of the page. - /// - /// It returns the number of all references to the page, including all the - /// existing page handles ([`Page`], [`DynPage`]), and all the mappings in the - /// page table that points to the page. - /// - /// # Safety - /// - /// The function is safe to call, but using it requires extra care. The - /// reference count can be changed by other threads at any time including - /// potentially between calling this method and acting on the result. - pub fn reference_count(&self) -> u32 { - self.slot().ref_count.load(Ordering::Relaxed) - } - - fn slot(&self) -> &MetaSlot { - // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an - // immutable reference to it is always safe. - unsafe { &*self.ptr } - } -} - -impl Clone for Page { - fn clone(&self) -> Self { - // SAFETY: We have already held a reference to the page. - unsafe { self.slot().inc_ref_count() }; - - Self { - ptr: self.ptr, - _marker: PhantomData, - } - } -} - -impl Drop for Page { - fn drop(&mut self) { - let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release); - debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); - - if last_ref_cnt == 1 { - // A fence is needed here with the same reasons stated in the implementation of - // `Arc::drop`: . - core::sync::atomic::fence(Ordering::Acquire); - - // SAFETY: this is the last reference and is about to be dropped. - unsafe { - meta::drop_last_in_place(self.ptr as *mut MetaSlot); - } - } - } -} - -/// A page with a dynamically-known usage. -/// -/// It can also be used when the user don't care about the usage of the page. -#[derive(Debug)] -pub struct DynPage { - ptr: *const MetaSlot, -} - -unsafe impl Send for DynPage {} -unsafe impl Sync for DynPage {} - -impl DynPage { - /// Forget the handle to the page. - /// - /// This is the same as [`Page::into_raw`]. - /// - /// This will result in the page being leaked without calling the custom dropper. - /// - /// A physical address to the page is returned in case the page needs to be - /// restored using [`Self::from_raw`] later. - pub(in crate::mm) fn into_raw(self) -> Paddr { - let paddr = self.paddr(); - core::mem::forget(self); - paddr - } - - /// Restore a forgotten page from a physical address. - /// - /// # Safety - /// - /// The safety concerns are the same as [`Page::from_raw`]. - pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self { - let vaddr = mapping::page_to_meta::(paddr); - let ptr = vaddr as *const MetaSlot; - - Self { ptr } - } - - /// Get the metadata of this page. - pub fn meta(&self) -> &dyn Any { - let slot = self.slot(); - - // SAFETY: The page metadata is valid to be borrowed immutably, since it will never be - // borrowed mutably after initialization. - let vtable_ptr = unsafe { &*slot.vtable_ptr.get() }; - - // SAFETY: The page metadata is initialized and valid. - let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() }; - - let meta_ptr: *const dyn PageMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr); - - // SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably - // borrowed under `vtable_ptr` because the vtable is correct, it lives under the given - // lifetime, and no one will mutably borrow the page metadata after initialization. - (unsafe { &*meta_ptr }) as &dyn Any - } - - /// Get the physical address of the start of the page - pub fn paddr(&self) -> Paddr { - mapping::meta_to_page::(self.ptr as Vaddr) - } - - /// Get the paging level of this page. - pub fn level(&self) -> PagingLevel { - 1 - } - - /// Size of this page in bytes. - pub fn size(&self) -> usize { - PAGE_SIZE - } - - fn slot(&self) -> &MetaSlot { - // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an - // immutable reference to it is always safe. - unsafe { &*self.ptr } - } -} - -impl TryFrom for Page { - type Error = DynPage; - - /// Try converting a [`DynPage`] into the statically-typed [`Page`]. - /// - /// If the usage of the page is not the same as the expected usage, it will - /// return the dynamic page itself as is. - fn try_from(dyn_page: DynPage) -> Result { - if dyn_page.meta().is::() { - let result = Page { - ptr: dyn_page.ptr, - _marker: PhantomData, - }; - let _ = ManuallyDrop::new(dyn_page); - Ok(result) - } else { - Err(dyn_page) - } - } -} - -impl From> for DynPage { - fn from(page: Page) -> Self { - let result = Self { ptr: page.ptr }; - let _ = ManuallyDrop::new(page); - result - } -} - -impl From for DynPage { - fn from(frame: Frame) -> Self { - Page::::from(frame).into() - } -} - -impl Clone for DynPage { - fn clone(&self) -> Self { - // SAFETY: We have already held a reference to the page. - unsafe { self.slot().inc_ref_count() }; - - Self { ptr: self.ptr } - } -} - -impl Drop for DynPage { - fn drop(&mut self) { - let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release); - debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); - - if last_ref_cnt == 1 { - // A fence is needed here with the same reasons stated in the implementation of - // `Arc::drop`: . - core::sync::atomic::fence(Ordering::Acquire); - - // SAFETY: this is the last reference and is about to be dropped. - unsafe { - meta::drop_last_in_place(self.ptr as *mut MetaSlot); - } - } - } -} - -/// Increases the reference count of the page by one. -/// -/// # Safety -/// -/// The caller should ensure the following conditions: -/// 1. The physical address must represent a valid page; -/// 2. The caller must have already held a reference to the page. -pub(in crate::mm) unsafe fn inc_page_ref_count(paddr: Paddr) { - debug_assert!(paddr % PAGE_SIZE == 0); - debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); - - let vaddr: Vaddr = mapping::page_to_meta::(paddr); - // SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking - // an immutable reference to it is always safe. - let slot = unsafe { &*(vaddr as *const MetaSlot) }; - - // SAFETY: We have already held a reference to the page. - unsafe { slot.inc_ref_count() }; -} diff --git a/ostd/src/mm/page_table/boot_pt.rs b/ostd/src/mm/page_table/boot_pt.rs index 539e55fe1..c26cca65b 100644 --- a/ostd/src/mm/page_table/boot_pt.rs +++ b/ostd/src/mm/page_table/boot_pt.rs @@ -16,7 +16,7 @@ use crate::{ cpu::num_cpus, cpu_local_cell, mm::{ - nr_subpage_per_huge, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, Paddr, PageProperty, + frame::allocator::PAGE_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, PageProperty, PagingConstsTrait, Vaddr, PAGE_SIZE, }, sync::SpinLock, diff --git a/ostd/src/mm/page_table/cursor.rs b/ostd/src/mm/page_table/cursor.rs index 24109b9f3..79d74b362 100644 --- a/ostd/src/mm/page_table/cursor.rs +++ b/ostd/src/mm/page_table/cursor.rs @@ -76,7 +76,7 @@ use super::{ }; use crate::{ mm::{ - kspace::should_map_as_tracked, paddr_to_vaddr, page::DynPage, Paddr, PageProperty, Vaddr, + frame::AnyFrame, kspace::should_map_as_tracked, paddr_to_vaddr, Paddr, PageProperty, Vaddr, }, task::{disable_preempt, DisabledPreemptGuard}, }; @@ -89,7 +89,7 @@ pub enum PageTableItem { }, Mapped { va: Vaddr, - page: DynPage, + page: AnyFrame, prop: PageProperty, }, #[allow(dead_code)] @@ -231,7 +231,7 @@ where len: page_size::(level), }); } - Child::Page(page, prop) => { + Child::Frame(page, prop) => { return Ok(PageTableItem::Mapped { va, page, prop }); } Child::Untracked(pa, plevel, prop) => { @@ -400,9 +400,9 @@ where self.0.query() } - /// Maps the range starting from the current address to a [`DynPage`]. + /// Maps the range starting from the current address to a [`AnyFrame`]. /// - /// It returns the previously mapped [`DynPage`] if that exists. + /// It returns the previously mapped [`AnyFrame`] if that exists. /// /// # Panics /// @@ -415,7 +415,7 @@ where /// /// The caller should ensure that the virtual range being mapped does /// not affect kernel's memory safety. - pub unsafe fn map(&mut self, page: DynPage, prop: PageProperty) -> Option { + pub unsafe fn map(&mut self, page: AnyFrame, prop: PageProperty) -> Option { let end = self.0.va + page.size(); assert!(end <= self.0.barrier_va.end); @@ -437,7 +437,7 @@ where let _ = cur_entry.replace(Child::PageTable(pt.clone_raw())); self.0.push_level(pt); } - Child::Page(_, _) => { + Child::Frame(_, _) => { panic!("Mapping a smaller page in an already mapped huge page"); } Child::Untracked(_, _, _) => { @@ -449,11 +449,11 @@ where debug_assert_eq!(self.0.level, page.level()); // Map the current page. - let old = self.0.cur_entry().replace(Child::Page(page, prop)); + let old = self.0.cur_entry().replace(Child::Frame(page, prop)); self.0.move_forward(); match old { - Child::Page(old_page, _) => Some(old_page), + Child::Frame(old_page, _) => Some(old_page), Child::None => None, Child::PageTable(_) => { todo!("Dropping page table nodes while mapping requires TLB flush") @@ -520,7 +520,7 @@ where let _ = cur_entry.replace(Child::PageTable(pt.clone_raw())); self.0.push_level(pt); } - Child::Page(_, _) => { + Child::Frame(_, _) => { panic!("Mapping a smaller page in an already mapped huge page"); } Child::Untracked(_, _, _) => { @@ -614,7 +614,7 @@ where Child::None => { unreachable!("Already checked"); } - Child::Page(_, _) => { + Child::Frame(_, _) => { panic!("Removing part of a huge page"); } Child::Untracked(_, _, _) => { @@ -631,7 +631,7 @@ where self.0.move_forward(); return match old { - Child::Page(page, prop) => PageTableItem::Mapped { + Child::Frame(page, prop) => PageTableItem::Mapped { va: self.0.va, page, prop, @@ -796,7 +796,7 @@ where Child::Untracked(_, _, _) => { panic!("Copying untracked mappings"); } - Child::Page(page, mut prop) => { + Child::Frame(page, mut prop) => { let mapped_page_size = page.size(); // Do protection. diff --git a/ostd/src/mm/page_table/node/child.rs b/ostd/src/mm/page_table/node/child.rs index af8238e29..88cec5a07 100644 --- a/ostd/src/mm/page_table/node/child.rs +++ b/ostd/src/mm/page_table/node/child.rs @@ -8,7 +8,7 @@ use super::{MapTrackingStatus, PageTableEntryTrait, RawPageTableNode}; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, mm::{ - page::{inc_page_ref_count, DynPage}, + frame::{inc_page_ref_count, AnyFrame}, page_prop::PageProperty, Paddr, PagingConstsTrait, PagingLevel, }, @@ -27,7 +27,7 @@ pub(in crate::mm) enum Child< [(); C::NR_LEVELS as usize]:, { PageTable(RawPageTableNode), - Page(DynPage, PageProperty), + Frame(AnyFrame, PageProperty), /// Pages not tracked by handles. Untracked(Paddr, PagingLevel, PageProperty), None, @@ -53,7 +53,7 @@ where ) -> bool { match self { Child::PageTable(pt) => node_level == pt.level() + 1, - Child::Page(p, _) => { + Child::Frame(p, _) => { node_level == p.level() && is_tracked == MapTrackingStatus::Tracked } Child::Untracked(_, level, _) => { @@ -78,7 +78,7 @@ where let pt = ManuallyDrop::new(pt); E::new_pt(pt.paddr()) } - Child::Page(page, prop) => { + Child::Frame(page, prop) => { let level = page.level(); E::new_page(page.into_raw(), level, prop) } @@ -119,8 +119,8 @@ where match is_tracked { MapTrackingStatus::Tracked => { // SAFETY: The physical address points to a valid page. - let page = unsafe { DynPage::from_raw(paddr) }; - Child::Page(page, pte.prop()) + let page = unsafe { AnyFrame::from_raw(paddr) }; + Child::Frame(page, pte.prop()) } MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"), @@ -162,8 +162,8 @@ where // the reference to the page. unsafe { inc_page_ref_count(paddr) }; // SAFETY: The physical address points to a valid page. - let page = unsafe { DynPage::from_raw(paddr) }; - Child::Page(page, pte.prop()) + let page = unsafe { AnyFrame::from_raw(paddr) }; + Child::Frame(page, pte.prop()) } MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"), diff --git a/ostd/src/mm/page_table/node/mod.rs b/ostd/src/mm/page_table/node/mod.rs index 27be8a5e6..b6766edc5 100644 --- a/ostd/src/mm/page_table/node/mod.rs +++ b/ostd/src/mm/page_table/node/mod.rs @@ -40,9 +40,8 @@ use super::{nr_subpage_per_huge, PageTableEntryTrait}; use crate::{ arch::mm::{PageTableEntry, PagingConsts}, mm::{ - paddr_to_vaddr, - page::{self, inc_page_ref_count, meta::PageMeta, DynPage, Page}, - Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE, + frame::{self, inc_page_ref_count, meta::FrameMeta, AnyFrame, Frame}, + paddr_to_vaddr, Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE, }, }; @@ -79,7 +78,7 @@ where /// Converts a raw handle to an accessible handle by pertaining the lock. pub(super) fn lock(self) -> PageTableNode { let level = self.level; - let page: Page> = self.into(); + let page: Frame> = self.into(); // Acquire the lock. let meta = page.meta(); @@ -187,7 +186,7 @@ where } impl From> - for Page> + for Frame> where [(); C::NR_LEVELS as usize]:, { @@ -196,7 +195,7 @@ where // SAFETY: The physical address in the raw handle is valid and we are // transferring the ownership to a new handle. No increment of the reference // count is needed. - unsafe { Page::>::from_raw(raw.paddr()) } + unsafe { Frame::>::from_raw(raw.paddr()) } } } @@ -207,7 +206,7 @@ where fn drop(&mut self) { // SAFETY: The physical address in the raw handle is valid. The restored // handle is dropped to decrement the reference count. - drop(unsafe { Page::>::from_raw(self.paddr()) }); + drop(unsafe { Frame::>::from_raw(self.paddr()) }); } } @@ -225,7 +224,7 @@ pub(super) struct PageTableNode< > where [(); C::NR_LEVELS as usize]:, { - page: Page>, + page: Frame>, } impl PageTableNode @@ -261,7 +260,7 @@ where /// extra unnecessary expensive operation. pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self { let meta = PageTablePageMeta::new_locked(level, is_tracked); - let page = page::allocator::alloc_single::>(meta).unwrap(); + let page = frame::allocator::alloc_single::>(meta).unwrap(); // Zero out the page table node. let ptr = paddr_to_vaddr(page.paddr()) as *mut u8; @@ -407,7 +406,7 @@ where // SAFETY: The layout of the `PageTablePageMeta` is ensured to be the same for // all possible generic parameters. And the layout fits the requirements. -unsafe impl PageMeta for PageTablePageMeta +unsafe impl FrameMeta for PageTablePageMeta where [(); C::NR_LEVELS as usize]:, { @@ -439,11 +438,11 @@ where if !pte.is_last(level) { // SAFETY: The PTE points to a page table node. The ownership // of the child is transferred to the child then dropped. - drop(unsafe { Page::::from_raw(paddr) }); + drop(unsafe { Frame::::from_raw(paddr) }); } else if is_tracked == MapTrackingStatus::Tracked { // SAFETY: The PTE points to a tracked page. The ownership // of the child is transferred to the child then dropped. - drop(unsafe { DynPage::from_raw(paddr) }); + drop(unsafe { AnyFrame::from_raw(paddr) }); } } } diff --git a/ostd/src/mm/page_table/test.rs b/ostd/src/mm/page_table/test.rs index 4de0ea862..ecf2ae0c7 100644 --- a/ostd/src/mm/page_table/test.rs +++ b/ostd/src/mm/page_table/test.rs @@ -5,9 +5,8 @@ use core::mem::ManuallyDrop; use super::*; use crate::{ mm::{ - frame::FrameMeta, + frame::{allocator, untyped::UntypedMeta}, kspace::LINEAR_MAPPING_BASE_VADDR, - page::allocator, page_prop::{CachePolicy, PageFlags}, MAX_USERSPACE_VADDR, }, @@ -32,7 +31,7 @@ fn test_tracked_map_unmap() { let pt = PageTable::::empty(); let from = PAGE_SIZE..PAGE_SIZE * 2; - let page = allocator::alloc_single(FrameMeta::default()).unwrap(); + let page = allocator::alloc_single(UntypedMeta::default()).unwrap(); let start_paddr = page.paddr(); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) }; @@ -88,7 +87,7 @@ fn test_user_copy_on_write() { let pt = PageTable::::empty(); let from = PAGE_SIZE..PAGE_SIZE * 2; - let page = allocator::alloc_single(FrameMeta::default()).unwrap(); + let page = allocator::alloc_single(UntypedMeta::default()).unwrap(); let start_paddr = page.paddr(); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) }; @@ -173,7 +172,7 @@ fn test_base_protect_query() { let from_ppn = 1..1000; let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end; - let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| FrameMeta::default()).unwrap(); + let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| UntypedMeta::default()).unwrap(); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); unsafe { let mut cursor = pt.cursor_mut(&from).unwrap(); diff --git a/ostd/src/mm/stat/mod.rs b/ostd/src/mm/stat/mod.rs index 47ef1738d..afc8105b9 100644 --- a/ostd/src/mm/stat/mod.rs +++ b/ostd/src/mm/stat/mod.rs @@ -2,7 +2,7 @@ //! APIs for memory statistics. -use crate::mm::page::allocator::PAGE_ALLOCATOR; +use crate::mm::frame::allocator::PAGE_ALLOCATOR; /// Total memory available for any usages in the system (in bytes). /// diff --git a/ostd/src/mm/tlb.rs b/ostd/src/mm/tlb.rs index b50d7d0bb..545e9471a 100644 --- a/ostd/src/mm/tlb.rs +++ b/ostd/src/mm/tlb.rs @@ -5,7 +5,7 @@ use alloc::vec::Vec; use core::ops::Range; -use super::{page::DynPage, Vaddr, PAGE_SIZE}; +use super::{frame::AnyFrame, Vaddr, PAGE_SIZE}; use crate::{ cpu::{CpuSet, PinCurrentCpu}, cpu_local, @@ -77,7 +77,7 @@ impl TlbFlusher { /// flushed. Otherwise if the page is recycled for other purposes, the user /// space program can still access the page through the TLB entries. This /// method is designed to be used in such cases. - pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: DynPage) { + pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: AnyFrame) { self.issue_tlb_flush_(op, Some(drop_after_flush)); } @@ -91,7 +91,7 @@ impl TlbFlusher { self.need_self_flush } - fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option) { + fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option) { let op = op.optimize_for_large_range(); // Fast path for single CPU cases. @@ -156,7 +156,7 @@ impl TlbFlushOp { // Lock ordering: lock FLUSH_OPS before PAGE_KEEPER. cpu_local! { static FLUSH_OPS: SpinLock = SpinLock::new(OpsStack::new()); - static PAGE_KEEPER: SpinLock, LocalIrqDisabled> = SpinLock::new(Vec::new()); + static PAGE_KEEPER: SpinLock, LocalIrqDisabled> = SpinLock::new(Vec::new()); } fn do_remote_flush() { diff --git a/ostd/src/mm/vm_space.rs b/ostd/src/mm/vm_space.rs index 482ce0a52..1c330fbfe 100644 --- a/ostd/src/mm/vm_space.rs +++ b/ostd/src/mm/vm_space.rs @@ -22,7 +22,7 @@ use crate::{ kspace::KERNEL_PAGE_TABLE, page_table::{self, PageTable, PageTableItem, UserMode}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, - Frame, PageProperty, VmReader, VmWriter, MAX_USERSPACE_VADDR, + PageProperty, UntypedFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR, }, prelude::*, sync::{PreemptDisabled, RwLock, RwLockReadGuard}, @@ -40,7 +40,7 @@ use crate::{ /// /// A newly-created `VmSpace` is not backed by any physical memory pages. To /// provide memory pages for a `VmSpace`, one can allocate and map physical -/// memory ([`Frame`]s) to the `VmSpace` using the cursor. +/// memory ([`UntypedFrame`]s) to the `VmSpace` using the cursor. /// /// A `VmSpace` can also attach a page fault handler, which will be invoked to /// handle page faults generated from user space. @@ -323,7 +323,7 @@ impl CursorMut<'_, '_> { /// Map a frame into the current slot. /// /// This method will bring the cursor to the next slot after the modification. - pub fn map(&mut self, frame: Frame, prop: PageProperty) { + pub fn map(&mut self, frame: UntypedFrame, prop: PageProperty) { let start_va = self.virt_addr(); // SAFETY: It is safe to map untyped memory into the userspace. let old = unsafe { self.pt_cursor.map(frame.into(), prop) }; @@ -475,7 +475,7 @@ pub enum VmItem { /// The virtual address of the slot. va: Vaddr, /// The mapped frame. - frame: Frame, + frame: UntypedFrame, /// The property of the slot. prop: PageProperty, }, diff --git a/ostd/src/task/kernel_stack.rs b/ostd/src/task/kernel_stack.rs index 7efd19cfe..b25f2adb8 100644 --- a/ostd/src/task/kernel_stack.rs +++ b/ostd/src/task/kernel_stack.rs @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MPL-2.0 use crate::{ - impl_page_meta, + impl_frame_meta_for, mm::{ + frame::allocator, kspace::kvirt_area::{KVirtArea, Tracked}, - page::allocator, page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, PAGE_SIZE, }, @@ -38,7 +38,7 @@ pub struct KernelStack { #[derive(Debug, Default)] struct KernelStackMeta {} -impl_page_meta!(KernelStackMeta); +impl_frame_meta_for!(KernelStackMeta); impl KernelStack { /// Generates a kernel stack with guard pages.