Polish the doc and rename variables in ostd::mm

This commit is contained in:
Zhang Junyang
2024-12-25 22:53:24 +08:00
committed by Tate, Hongliang Tian
parent f332797084
commit 983a6af3cc
41 changed files with 430 additions and 414 deletions

View File

@ -5,7 +5,7 @@ use bitvec::array::BitArray;
use int_to_c_enum::TryFromInt; use int_to_c_enum::TryFromInt;
use ostd::{ use ostd::{
mm::{ mm::{
DmaDirection, DmaStream, DmaStreamSlice, DynUSegment, FrameAllocOptions, Infallible, VmIo, DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, USegment, VmIo,
VmReader, VmWriter, VmReader, VmWriter,
}, },
sync::{SpinLock, WaitQueue}, sync::{SpinLock, WaitQueue},
@ -442,8 +442,8 @@ impl<'a> BioSegment {
} }
} }
/// Constructs a new `BioSegment` with a given `DynUSegment` and the bio direction. /// Constructs a new `BioSegment` with a given `USegment` and the bio direction.
pub fn new_from_segment(segment: DynUSegment, direction: BioDirection) -> Self { pub fn new_from_segment(segment: USegment, direction: BioDirection) -> Self {
let len = segment.size(); let len = segment.size();
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap(); let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
Self { Self {
@ -481,7 +481,7 @@ impl<'a> BioSegment {
/// Returns the inner VM segment. /// Returns the inner VM segment.
#[cfg(ktest)] #[cfg(ktest)]
pub fn inner_segment(&self) -> &DynUSegment { pub fn inner_segment(&self) -> &USegment {
self.inner.dma_slice.stream().segment() self.inner.dma_slice.stream().segment()
} }

View File

@ -76,7 +76,7 @@ impl VirtQueue {
} }
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() { let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
// Currently, we use one DynUFrame to place the descriptors and available rings, one DynUFrame to place used rings // Currently, we use one UFrame to place the descriptors and available rings, one UFrame to place used rings
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128. // because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
if size > 128 { if size > 128 {
return Err(QueueError::InvalidArgs); return Err(QueueError::InvalidArgs);

View File

@ -54,7 +54,7 @@ use ostd::{
/// ///
/// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo` /// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo`
/// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and /// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and
/// `DynUFrame`. The blanket implementations of `VmIo` also include pointer-like /// `UFrame`. The blanket implementations of `VmIo` also include pointer-like
/// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`, /// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`,
/// and `Arc<IoMem>`. /// and `Arc<IoMem>`.
/// ///

View File

@ -2,41 +2,41 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! Provides [`SegmentSlice`] for quick duplication and slicing over [`DynUSegment`]. //! Provides [`SegmentSlice`] for quick duplication and slicing over [`USegment`].
use alloc::sync::Arc; use alloc::sync::Arc;
use core::ops::Range; use core::ops::Range;
use ostd::{ use ostd::{
mm::{ mm::{
DynUFrame, DynUSegment, FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedMem, FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UFrame, USegment, UntypedMem, VmIo,
VmIo, VmReader, VmWriter, PAGE_SIZE, VmReader, VmWriter, PAGE_SIZE,
}, },
Error, Result, Error, Result,
}; };
/// A reference to a slice of a [`DynUSegment`]. /// A reference to a slice of a [`USegment`].
/// ///
/// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference /// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference
/// count. While cloning a [`DynUSegment`] will increment the reference count of /// count. While cloning a [`USegment`] will increment the reference count of
/// many underlying pages. /// many underlying pages.
/// ///
/// The downside is that the [`SegmentSlice`] requires heap allocation. Also, /// The downside is that the [`SegmentSlice`] requires heap allocation. Also,
/// if any [`SegmentSlice`] of the original [`DynUSegment`] is alive, all pages in /// if any [`SegmentSlice`] of the original [`USegment`] is alive, all pages in
/// the original [`DynUSegment`], including the pages that are not referenced, will /// the original [`USegment`], including the pages that are not referenced, will
/// not be freed. /// not be freed.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct SegmentSlice { pub struct SegmentSlice {
inner: Arc<DynUSegment>, inner: Arc<USegment>,
range: Range<usize>, range: Range<usize>,
} }
impl SegmentSlice { impl SegmentSlice {
/// Returns a part of the `DynUSegment`. /// Returns a part of the `USegment`.
/// ///
/// # Panics /// # Panics
/// ///
/// If `range` is not within the range of this `DynUSegment`, /// If `range` is not within the range of this `USegment`,
/// then the method panics. /// then the method panics.
pub fn range(&self, range: Range<usize>) -> Self { pub fn range(&self, range: Range<usize>) -> Self {
let orig_range = &self.range; let orig_range = &self.range;
@ -124,8 +124,8 @@ impl VmIo for SegmentSlice {
} }
} }
impl From<DynUSegment> for SegmentSlice { impl From<USegment> for SegmentSlice {
fn from(segment: DynUSegment) -> Self { fn from(segment: USegment) -> Self {
let range = 0..segment.size() / PAGE_SIZE; let range = 0..segment.size() / PAGE_SIZE;
Self { Self {
inner: Arc::new(segment), inner: Arc::new(segment),
@ -134,7 +134,7 @@ impl From<DynUSegment> for SegmentSlice {
} }
} }
impl From<SegmentSlice> for DynUSegment { impl From<SegmentSlice> for USegment {
fn from(slice: SegmentSlice) -> Self { fn from(slice: SegmentSlice) -> Self {
let start = slice.range.start * PAGE_SIZE; let start = slice.range.start * PAGE_SIZE;
let end = slice.range.end * PAGE_SIZE; let end = slice.range.end * PAGE_SIZE;
@ -142,8 +142,8 @@ impl From<SegmentSlice> for DynUSegment {
} }
} }
impl From<DynUFrame> for SegmentSlice { impl From<UFrame> for SegmentSlice {
fn from(frame: DynUFrame) -> Self { fn from(frame: UFrame) -> Self {
SegmentSlice::from(DynUSegment::from(frame)) SegmentSlice::from(USegment::from(frame))
} }
} }

View File

@ -12,7 +12,7 @@ use aster_block::{
}; };
use hashbrown::HashMap; use hashbrown::HashMap;
use lru::LruCache; use lru::LruCache;
use ostd::mm::DynUFrame; use ostd::mm::UFrame;
pub(super) use ostd::mm::VmIo; pub(super) use ostd::mm::VmIo;
use super::{ use super::{
@ -368,7 +368,7 @@ impl ExfatFS {
} }
impl PageCacheBackend for ExfatFS { impl PageCacheBackend for ExfatFS {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
if self.fs_size() < idx * PAGE_SIZE { if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid read size") return_errno_with_message!(Errno::EINVAL, "invalid read size")
} }
@ -380,7 +380,7 @@ impl PageCacheBackend for ExfatFS {
Ok(waiter) Ok(waiter)
} }
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
if self.fs_size() < idx * PAGE_SIZE { if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid write size") return_errno_with_message!(Errno::EINVAL, "invalid write size")
} }

View File

@ -13,7 +13,7 @@ use aster_block::{
BLOCK_SIZE, BLOCK_SIZE,
}; };
use aster_rights::Full; use aster_rights::Full;
use ostd::mm::{DynUFrame, VmIo}; use ostd::mm::{UFrame, VmIo};
use super::{ use super::{
constants::*, constants::*,
@ -135,7 +135,7 @@ struct ExfatInodeInner {
} }
impl PageCacheBackend for ExfatInode { impl PageCacheBackend for ExfatInode {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
let inner = self.inner.read(); let inner = self.inner.read();
if inner.size < idx * PAGE_SIZE { if inner.size < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "Invalid read size") return_errno_with_message!(Errno::EINVAL, "Invalid read size")
@ -150,7 +150,7 @@ impl PageCacheBackend for ExfatInode {
Ok(waiter) Ok(waiter)
} }
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
let inner = self.inner.read(); let inner = self.inner.read();
let sector_size = inner.fs().sector_size(); let sector_size = inner.fs().sector_size();

View File

@ -28,7 +28,7 @@ struct BlockGroupImpl {
impl BlockGroup { impl BlockGroup {
/// Loads and constructs a block group. /// Loads and constructs a block group.
pub fn load( pub fn load(
group_descriptors_segment: &DynUSegment, group_descriptors_segment: &USegment,
idx: usize, idx: usize,
block_device: &dyn BlockDevice, block_device: &dyn BlockDevice,
super_block: &SuperBlock, super_block: &SuperBlock,
@ -318,7 +318,7 @@ impl Debug for BlockGroup {
} }
impl PageCacheBackend for BlockGroupImpl { impl PageCacheBackend for BlockGroupImpl {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
let bid = self.inode_table_bid + idx as Ext2Bid; let bid = self.inode_table_bid + idx as Ext2Bid;
let bio_segment = let bio_segment =
BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice); BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice);
@ -328,7 +328,7 @@ impl PageCacheBackend for BlockGroupImpl {
.read_blocks_async(bid, bio_segment) .read_blocks_async(bid, bio_segment)
} }
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
let bid = self.inode_table_bid + idx as Ext2Bid; let bid = self.inode_table_bid + idx as Ext2Bid;
let bio_segment = let bio_segment =
BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice); BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice);

View File

@ -23,7 +23,7 @@ pub struct Ext2 {
blocks_per_group: Ext2Bid, blocks_per_group: Ext2Bid,
inode_size: usize, inode_size: usize,
block_size: usize, block_size: usize,
group_descriptors_segment: DynUSegment, group_descriptors_segment: USegment,
self_ref: Weak<Self>, self_ref: Weak<Self>,
} }
@ -63,7 +63,7 @@ impl Ext2 {
// Load the block groups information // Load the block groups information
let load_block_groups = |fs: Weak<Ext2>, let load_block_groups = |fs: Weak<Ext2>,
block_device: &dyn BlockDevice, block_device: &dyn BlockDevice,
group_descriptors_segment: &DynUSegment| group_descriptors_segment: &USegment|
-> Result<Vec<BlockGroup>> { -> Result<Vec<BlockGroup>> {
let block_groups_count = super_block.block_groups_count() as usize; let block_groups_count = super_block.block_groups_count() as usize;
let mut block_groups = Vec::with_capacity(block_groups_count); let mut block_groups = Vec::with_capacity(block_groups_count);

View File

@ -1733,7 +1733,7 @@ impl InodeImpl {
writer: &mut VmWriter, writer: &mut VmWriter,
) -> Result<BioWaiter>; ) -> Result<BioWaiter>;
pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>; pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>;
pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter>; pub fn read_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result<BioWaiter>;
pub fn write_blocks_async( pub fn write_blocks_async(
&self, &self,
bid: Ext2Bid, bid: Ext2Bid,
@ -1741,7 +1741,7 @@ impl InodeImpl {
reader: &mut VmReader, reader: &mut VmReader,
) -> Result<BioWaiter>; ) -> Result<BioWaiter>;
pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>; pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>;
pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter>; pub fn write_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result<BioWaiter>;
} }
/// Manages the inode blocks and block I/O operations. /// Manages the inode blocks and block I/O operations.
@ -1789,7 +1789,7 @@ impl InodeBlockManager {
} }
} }
pub fn read_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter> { pub fn read_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result<BioWaiter> {
let mut bio_waiter = BioWaiter::new(); let mut bio_waiter = BioWaiter::new();
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
@ -1834,7 +1834,7 @@ impl InodeBlockManager {
} }
} }
pub fn write_block_async(&self, bid: Ext2Bid, frame: &DynUFrame) -> Result<BioWaiter> { pub fn write_block_async(&self, bid: Ext2Bid, frame: &UFrame) -> Result<BioWaiter> {
let mut bio_waiter = BioWaiter::new(); let mut bio_waiter = BioWaiter::new();
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
@ -1858,12 +1858,12 @@ impl InodeBlockManager {
} }
impl PageCacheBackend for InodeBlockManager { impl PageCacheBackend for InodeBlockManager {
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
let bid = idx as Ext2Bid; let bid = idx as Ext2Bid;
self.read_block_async(bid, frame) self.read_block_async(bid, frame)
} }
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter> {
let bid = idx as Ext2Bid; let bid = idx as Ext2Bid;
self.write_block_async(bid, frame) self.write_block_async(bid, frame)
} }

View File

@ -13,7 +13,7 @@ pub(super) use aster_block::{
}; };
pub(super) use aster_rights::Full; pub(super) use aster_rights::Full;
pub(super) use ostd::{ pub(super) use ostd::{
mm::{DynUFrame, DynUSegment, Frame, FrameAllocOptions, Segment, VmIo}, mm::{Frame, FrameAllocOptions, Segment, UFrame, USegment, VmIo},
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard}, sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
}; };
pub(super) use static_assertions::const_assert; pub(super) use static_assertions::const_assert;

View File

@ -11,7 +11,7 @@ use aster_rights::Full;
use aster_util::slot_vec::SlotVec; use aster_util::slot_vec::SlotVec;
use hashbrown::HashMap; use hashbrown::HashMap;
use ostd::{ use ostd::{
mm::{DynUFrame, UntypedMem, VmIo}, mm::{UFrame, UntypedMem, VmIo},
sync::{PreemptDisabled, RwLockWriteGuard}, sync::{PreemptDisabled, RwLockWriteGuard},
}; };
@ -484,7 +484,7 @@ impl RamInode {
} }
impl PageCacheBackend for RamInode { impl PageCacheBackend for RamInode {
fn read_page_async(&self, _idx: usize, frame: &DynUFrame) -> Result<BioWaiter> { fn read_page_async(&self, _idx: usize, frame: &UFrame) -> Result<BioWaiter> {
// Initially, any block/page in a RamFs inode contains all zeros // Initially, any block/page in a RamFs inode contains all zeros
frame frame
.writer() .writer()
@ -494,7 +494,7 @@ impl PageCacheBackend for RamInode {
Ok(BioWaiter::new()) Ok(BioWaiter::new())
} }
fn write_page_async(&self, _idx: usize, _frame: &DynUFrame) -> Result<BioWaiter> { fn write_page_async(&self, _idx: usize, _frame: &UFrame) -> Result<BioWaiter> {
// do nothing // do nothing
Ok(BioWaiter::new()) Ok(BioWaiter::new())
} }

View File

@ -14,7 +14,7 @@ use aster_rights::Full;
use lru::LruCache; use lru::LruCache;
use ostd::{ use ostd::{
impl_untyped_frame_meta_for, impl_untyped_frame_meta_for,
mm::{DynUFrame, Frame, FrameAllocOptions, UntypedMem, VmIo}, mm::{Frame, FrameAllocOptions, UFrame, UntypedMem, VmIo},
}; };
use crate::{ use crate::{
@ -388,7 +388,7 @@ impl PageCacheManager {
Ok(()) Ok(())
} }
fn ondemand_readahead(&self, idx: usize) -> Result<DynUFrame> { fn ondemand_readahead(&self, idx: usize) -> Result<UFrame> {
let mut pages = self.pages.lock(); let mut pages = self.pages.lock();
let mut ra_state = self.ra_state.lock(); let mut ra_state = self.ra_state.lock();
let backend = self.backend(); let backend = self.backend();
@ -445,7 +445,7 @@ impl Debug for PageCacheManager {
} }
impl Pager for PageCacheManager { impl Pager for PageCacheManager {
fn commit_page(&self, idx: usize) -> Result<DynUFrame> { fn commit_page(&self, idx: usize) -> Result<UFrame> {
self.ondemand_readahead(idx) self.ondemand_readahead(idx)
} }
@ -476,7 +476,7 @@ impl Pager for PageCacheManager {
Ok(()) Ok(())
} }
fn commit_overwrite(&self, idx: usize) -> Result<DynUFrame> { fn commit_overwrite(&self, idx: usize) -> Result<UFrame> {
if let Some(page) = self.pages.lock().get(&idx) { if let Some(page) = self.pages.lock().get(&idx) {
return Ok(page.clone().into()); return Ok(page.clone().into());
} }
@ -573,16 +573,16 @@ impl AtomicPageState {
/// This trait represents the backend for the page cache. /// This trait represents the backend for the page cache.
pub trait PageCacheBackend: Sync + Send { pub trait PageCacheBackend: Sync + Send {
/// Reads a page from the backend asynchronously. /// Reads a page from the backend asynchronously.
fn read_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter>; fn read_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter>;
/// Writes a page to the backend asynchronously. /// Writes a page to the backend asynchronously.
fn write_page_async(&self, idx: usize, frame: &DynUFrame) -> Result<BioWaiter>; fn write_page_async(&self, idx: usize, frame: &UFrame) -> Result<BioWaiter>;
/// Returns the number of pages in the backend. /// Returns the number of pages in the backend.
fn npages(&self) -> usize; fn npages(&self) -> usize;
} }
impl dyn PageCacheBackend { impl dyn PageCacheBackend {
/// Reads a page from the backend synchronously. /// Reads a page from the backend synchronously.
fn read_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> { fn read_page(&self, idx: usize, frame: &UFrame) -> Result<()> {
let waiter = self.read_page_async(idx, frame)?; let waiter = self.read_page_async(idx, frame)?;
match waiter.wait() { match waiter.wait() {
Some(BioStatus::Complete) => Ok(()), Some(BioStatus::Complete) => Ok(()),
@ -590,7 +590,7 @@ impl dyn PageCacheBackend {
} }
} }
/// Writes a page to the backend synchronously. /// Writes a page to the backend synchronously.
fn write_page(&self, idx: usize, frame: &DynUFrame) -> Result<()> { fn write_page(&self, idx: usize, frame: &UFrame) -> Result<()> {
let waiter = self.write_page_async(idx, frame)?; let waiter = self.write_page_async(idx, frame)?;
match waiter.wait() { match waiter.wait() {
Some(BioStatus::Complete) => Ok(()), Some(BioStatus::Complete) => Ok(()),

View File

@ -21,7 +21,7 @@ use aster_rights::Rights;
use aster_time::{read_monotonic_time, Instant}; use aster_time::{read_monotonic_time, Instant};
use aster_util::coeff::Coeff; use aster_util::coeff::Coeff;
use ostd::{ use ostd::{
mm::{DynUFrame, VmIo, PAGE_SIZE}, mm::{UFrame, VmIo, PAGE_SIZE},
sync::SpinLock, sync::SpinLock,
Pod, Pod,
}; };
@ -199,9 +199,9 @@ struct Vdso {
data: SpinLock<VdsoData>, data: SpinLock<VdsoData>,
/// The VMO of the entire VDSO, including the library text and the VDSO data. /// The VMO of the entire VDSO, including the library text and the VDSO data.
vmo: Arc<Vmo>, vmo: Arc<Vmo>,
/// The `DynUFrame` that contains the VDSO data. This frame is contained in and /// The `UFrame` that contains the VDSO data. This frame is contained in and
/// will not be removed from the VDSO VMO. /// will not be removed from the VDSO VMO.
data_frame: DynUFrame, data_frame: UFrame,
} }
/// A `SpinLock` for the `seq` field in `VdsoData`. /// A `SpinLock` for the `seq` field in `VdsoData`.

View File

@ -1,13 +1,13 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use ostd::mm::{DynUFrame, Frame, FrameAllocOptions, UntypedMem}; use ostd::mm::{Frame, FrameAllocOptions, UFrame, UntypedMem};
use crate::prelude::*; use crate::prelude::*;
/// Creates a new `Frame<()>` and initializes it with the contents of the `src`. /// Creates a new `Frame<()>` and initializes it with the contents of the `src`.
/// ///
/// Note that it only duplicates the contents not the metadata. /// Note that it only duplicates the contents not the metadata.
pub fn duplicate_frame(src: &DynUFrame) -> Result<Frame<()>> { pub fn duplicate_frame(src: &UFrame) -> Result<Frame<()>> {
let new_frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?; let new_frame = FrameAllocOptions::new().zeroed(false).alloc_frame()?;
new_frame.writer().write(&mut src.reader()); new_frame.writer().write(&mut src.reader());
Ok(new_frame) Ok(new_frame)

View File

@ -8,8 +8,8 @@ use core::{
use align_ext::AlignExt; use align_ext::AlignExt;
use ostd::mm::{ use ostd::mm::{
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, DynUFrame, FrameAllocOptions, PageFlags, tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty,
PageProperty, VmSpace, UFrame, VmSpace,
}; };
use super::interval_set::Interval; use super::interval_set::Interval;
@ -216,7 +216,7 @@ impl VmMapping {
Ok(()) Ok(())
} }
fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(DynUFrame, bool)> { fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(UFrame, bool)> {
let mut is_readonly = false; let mut is_readonly = false;
let Some(vmo) = &self.vmo else { let Some(vmo) = &self.vmo else {
return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly)); return Ok((FrameAllocOptions::new().alloc_frame()?.into(), is_readonly));
@ -264,7 +264,7 @@ impl VmMapping {
let vm_perms = self.perms - VmPerms::WRITE; let vm_perms = self.perms - VmPerms::WRITE;
let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?; let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?;
let operate = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| { let operate = move |commit_fn: &mut dyn FnMut() -> Result<UFrame>| {
if let VmItem::NotMapped { .. } = cursor.query().unwrap() { if let VmItem::NotMapped { .. } = cursor.query().unwrap() {
// We regard all the surrounding pages as accessed, no matter // We regard all the surrounding pages as accessed, no matter
// if it is really so. Then the hardware won't bother to update // if it is really so. Then the hardware won't bother to update
@ -432,7 +432,7 @@ impl MappedVmo {
/// ///
/// If the VMO has not committed a frame at this index, it will commit /// If the VMO has not committed a frame at this index, it will commit
/// one first and return it. /// one first and return it.
fn get_committed_frame(&self, page_offset: usize) -> Result<DynUFrame> { fn get_committed_frame(&self, page_offset: usize) -> Result<UFrame> {
debug_assert!(page_offset < self.range.len()); debug_assert!(page_offset < self.range.len());
debug_assert!(page_offset % PAGE_SIZE == 0); debug_assert!(page_offset % PAGE_SIZE == 0);
self.vmo.commit_page(self.range.start + page_offset) self.vmo.commit_page(self.range.start + page_offset)
@ -444,7 +444,7 @@ impl MappedVmo {
/// perform other operations. /// perform other operations.
fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()> fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UFrame>) -> Result<()>,
{ {
debug_assert!(range.start < self.range.len()); debug_assert!(range.start < self.range.len());
debug_assert!(range.end <= self.range.len()); debug_assert!(range.end <= self.range.len());

View File

@ -3,14 +3,14 @@
use core::ops::Range; use core::ops::Range;
use aster_rights::{Rights, TRights}; use aster_rights::{Rights, TRights};
use ostd::mm::{DynUFrame, VmIo}; use ostd::mm::{UFrame, VmIo};
use super::{CommitFlags, Vmo, VmoRightsOp}; use super::{CommitFlags, Vmo, VmoRightsOp};
use crate::prelude::*; use crate::prelude::*;
impl Vmo<Rights> { impl Vmo<Rights> {
/// Commits a page at specific offset /// Commits a page at specific offset
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> { pub fn commit_page(&self, offset: usize) -> Result<UFrame> {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset) self.0.commit_page(offset)
} }
@ -39,7 +39,7 @@ impl Vmo<Rights> {
/// perform other operations. /// perform other operations.
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()> pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UFrame>) -> Result<()>,
{ {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0 self.0
@ -112,7 +112,7 @@ impl Vmo<Rights> {
/// # Access rights /// # Access rights
/// ///
/// The method requires the Write right. /// The method requires the Write right.
pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { pub fn replace(&self, page: UFrame, page_idx: usize) -> Result<()> {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0.replace(page, page_idx) self.0.replace(page, page_idx)
} }

View File

@ -11,7 +11,7 @@ use align_ext::AlignExt;
use aster_rights::Rights; use aster_rights::Rights;
use ostd::{ use ostd::{
collections::xarray::{CursorMut, XArray}, collections::xarray::{CursorMut, XArray},
mm::{DynUFrame, FrameAllocOptions, UntypedMem, VmReader, VmWriter}, mm::{FrameAllocOptions, UFrame, UntypedMem, VmReader, VmWriter},
}; };
use crate::prelude::*; use crate::prelude::*;
@ -66,8 +66,8 @@ pub use pager::Pager;
/// # Implementation /// # Implementation
/// ///
/// `Vmo` provides high-level APIs for address space management by wrapping /// `Vmo` provides high-level APIs for address space management by wrapping
/// around its low-level counterpart [`ostd::mm::DynUFrame`]. /// around its low-level counterpart [`ostd::mm::UFrame`].
/// Compared with `DynUFrame`, /// Compared with `UFrame`,
/// `Vmo` is easier to use (by offering more powerful APIs) and /// `Vmo` is easier to use (by offering more powerful APIs) and
/// harder to misuse (thanks to its nature of being capability). /// harder to misuse (thanks to its nature of being capability).
#[derive(Debug)] #[derive(Debug)]
@ -125,12 +125,12 @@ bitflags! {
} }
} }
/// `Pages` is the struct that manages the `DynUFrame`s stored in `Vmo_`. /// `Pages` is the struct that manages the `UFrame`s stored in `Vmo_`.
pub(super) enum Pages { pub(super) enum Pages {
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size. /// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
Nonresizable(Mutex<XArray<DynUFrame>>, usize), Nonresizable(Mutex<XArray<UFrame>>, usize),
/// `Pages` that can be resized and have a variable size. /// `Pages` that can be resized and have a variable size.
Resizable(Mutex<(XArray<DynUFrame>, usize)>), Resizable(Mutex<(XArray<UFrame>, usize)>),
} }
impl Clone for Pages { impl Clone for Pages {
@ -149,7 +149,7 @@ impl Clone for Pages {
impl Pages { impl Pages {
fn with<R, F>(&self, func: F) -> R fn with<R, F>(&self, func: F) -> R
where where
F: FnOnce(&mut XArray<DynUFrame>, usize) -> R, F: FnOnce(&mut XArray<UFrame>, usize) -> R,
{ {
match self { match self {
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size), Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
@ -201,16 +201,16 @@ impl CommitFlags {
} }
impl Vmo_ { impl Vmo_ {
/// Prepares a new `DynUFrame` for the target index in pages, returns this new frame. /// Prepares a new `UFrame` for the target index in pages, returns this new frame.
fn prepare_page(&self, page_idx: usize) -> Result<DynUFrame> { fn prepare_page(&self, page_idx: usize) -> Result<UFrame> {
match &self.pager { match &self.pager {
None => Ok(FrameAllocOptions::new().alloc_frame()?.into()), None => Ok(FrameAllocOptions::new().alloc_frame()?.into()),
Some(pager) => pager.commit_page(page_idx), Some(pager) => pager.commit_page(page_idx),
} }
} }
/// Prepares a new `DynUFrame` for the target index in the VMO, returns this new frame. /// Prepares a new `UFrame` for the target index in the VMO, returns this new frame.
fn prepare_overwrite(&self, page_idx: usize) -> Result<DynUFrame> { fn prepare_overwrite(&self, page_idx: usize) -> Result<UFrame> {
if let Some(pager) = &self.pager { if let Some(pager) = &self.pager {
pager.commit_overwrite(page_idx) pager.commit_overwrite(page_idx)
} else { } else {
@ -220,9 +220,9 @@ impl Vmo_ {
fn commit_with_cursor( fn commit_with_cursor(
&self, &self,
cursor: &mut CursorMut<'_, DynUFrame>, cursor: &mut CursorMut<'_, UFrame>,
commit_flags: CommitFlags, commit_flags: CommitFlags,
) -> Result<DynUFrame> { ) -> Result<UFrame> {
let new_page = { let new_page = {
if let Some(committed_page) = cursor.load() { if let Some(committed_page) = cursor.load() {
// Fast path: return the page directly. // Fast path: return the page directly.
@ -241,7 +241,7 @@ impl Vmo_ {
/// Commits the page corresponding to the target offset in the VMO and return that page. /// Commits the page corresponding to the target offset in the VMO and return that page.
/// If the current offset has already been committed, the page will be returned directly. /// If the current offset has already been committed, the page will be returned directly.
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> { pub fn commit_page(&self, offset: usize) -> Result<UFrame> {
let page_idx = offset / PAGE_SIZE; let page_idx = offset / PAGE_SIZE;
self.pages.with(|pages, size| { self.pages.with(|pages, size| {
if offset >= size { if offset >= size {
@ -279,7 +279,7 @@ impl Vmo_ {
commit_flags: CommitFlags, commit_flags: CommitFlags,
) -> Result<()> ) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UFrame>) -> Result<()>,
{ {
self.pages.with(|pages, size| { self.pages.with(|pages, size| {
if range.end > size { if range.end > size {
@ -315,7 +315,7 @@ impl Vmo_ {
let read_range = offset..(offset + read_len); let read_range = offset..(offset + read_len);
let mut read_offset = offset % PAGE_SIZE; let mut read_offset = offset % PAGE_SIZE;
let read = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| { let read = move |commit_fn: &mut dyn FnMut() -> Result<UFrame>| {
let frame = commit_fn()?; let frame = commit_fn()?;
frame.reader().skip(read_offset).read_fallible(writer)?; frame.reader().skip(read_offset).read_fallible(writer)?;
read_offset = 0; read_offset = 0;
@ -331,7 +331,7 @@ impl Vmo_ {
let write_range = offset..(offset + write_len); let write_range = offset..(offset + write_len);
let mut write_offset = offset % PAGE_SIZE; let mut write_offset = offset % PAGE_SIZE;
let mut write = move |commit_fn: &mut dyn FnMut() -> Result<DynUFrame>| { let mut write = move |commit_fn: &mut dyn FnMut() -> Result<UFrame>| {
let frame = commit_fn()?; let frame = commit_fn()?;
frame.writer().skip(write_offset).write_fallible(reader)?; frame.writer().skip(write_offset).write_fallible(reader)?;
write_offset = 0; write_offset = 0;
@ -401,7 +401,7 @@ impl Vmo_ {
Ok(()) Ok(())
} }
fn decommit_pages(&self, pages: &mut XArray<DynUFrame>, range: Range<usize>) -> Result<()> { fn decommit_pages(&self, pages: &mut XArray<UFrame>, range: Range<usize>) -> Result<()> {
let page_idx_range = get_page_idx_range(&range); let page_idx_range = get_page_idx_range(&range);
let mut cursor = pages.cursor_mut(page_idx_range.start as u64); let mut cursor = pages.cursor_mut(page_idx_range.start as u64);
for page_idx in page_idx_range { for page_idx in page_idx_range {
@ -426,7 +426,7 @@ impl Vmo_ {
self.flags self.flags
} }
fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { fn replace(&self, page: UFrame, page_idx: usize) -> Result<()> {
self.pages.with(|pages, size| { self.pages.with(|pages, size| {
if page_idx >= size / PAGE_SIZE { if page_idx >= size / PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo"); return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo");

View File

@ -8,7 +8,7 @@ use align_ext::AlignExt;
use aster_rights::{Rights, TRightSet, TRights}; use aster_rights::{Rights, TRightSet, TRights};
use ostd::{ use ostd::{
collections::xarray::XArray, collections::xarray::XArray,
mm::{DynUFrame, DynUSegment, FrameAllocOptions}, mm::{FrameAllocOptions, UFrame, USegment},
}; };
use super::{Pager, Pages, Vmo, VmoFlags}; use super::{Pager, Pages, Vmo, VmoFlags};
@ -137,11 +137,11 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option<Arc<dyn Pager>>) -> Re
}) })
} }
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<DynUFrame>> { fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<UFrame>> {
if flags.contains(VmoFlags::CONTIGUOUS) { if flags.contains(VmoFlags::CONTIGUOUS) {
// if the vmo is continuous, we need to allocate frames for the vmo // if the vmo is continuous, we need to allocate frames for the vmo
let frames_num = size / PAGE_SIZE; let frames_num = size / PAGE_SIZE;
let segment: DynUSegment = FrameAllocOptions::new().alloc_segment(frames_num)?.into(); let segment: USegment = FrameAllocOptions::new().alloc_segment(frames_num)?.into();
let mut committed_pages = XArray::new(); let mut committed_pages = XArray::new();
let mut cursor = committed_pages.cursor_mut(0); let mut cursor = committed_pages.cursor_mut(0);
for frame in segment { for frame in segment {

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use ostd::mm::DynUFrame; use ostd::mm::UFrame;
use crate::prelude::*; use crate::prelude::*;
@ -26,7 +26,7 @@ pub trait Pager: Send + Sync {
/// whatever frame that may or may not be the same as the last time. /// whatever frame that may or may not be the same as the last time.
/// ///
/// It is up to the pager to decide the range of valid indices. /// It is up to the pager to decide the range of valid indices.
fn commit_page(&self, idx: usize) -> Result<DynUFrame>; fn commit_page(&self, idx: usize) -> Result<UFrame>;
/// Notify the pager that the frame at a specified index has been updated. /// Notify the pager that the frame at a specified index has been updated.
/// ///
@ -54,5 +54,5 @@ pub trait Pager: Send + Sync {
/// Ask the pager to provide a frame at a specified index. /// Ask the pager to provide a frame at a specified index.
/// Notify the pager that the frame will be fully overwritten soon, so pager can /// Notify the pager that the frame will be fully overwritten soon, so pager can
/// choose not to initialize it. /// choose not to initialize it.
fn commit_overwrite(&self, idx: usize) -> Result<DynUFrame>; fn commit_overwrite(&self, idx: usize) -> Result<UFrame>;
} }

View File

@ -4,14 +4,14 @@ use core::ops::Range;
use aster_rights::{Dup, Rights, TRightSet, TRights, Write}; use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
use aster_rights_proc::require; use aster_rights_proc::require;
use ostd::mm::{DynUFrame, VmIo}; use ostd::mm::{UFrame, VmIo};
use super::{CommitFlags, Vmo, VmoRightsOp}; use super::{CommitFlags, Vmo, VmoRightsOp};
use crate::prelude::*; use crate::prelude::*;
impl<R: TRights> Vmo<TRightSet<R>> { impl<R: TRights> Vmo<TRightSet<R>> {
/// Commits a page at specific offset. /// Commits a page at specific offset.
pub fn commit_page(&self, offset: usize) -> Result<DynUFrame> { pub fn commit_page(&self, offset: usize) -> Result<UFrame> {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset) self.0.commit_page(offset)
} }
@ -41,7 +41,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
#[require(R > Write)] #[require(R > Write)]
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()> pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<DynUFrame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UFrame>) -> Result<()>,
{ {
self.0 self.0
.operate_on_range(range, operate, CommitFlags::empty()) .operate_on_range(range, operate, CommitFlags::empty())
@ -114,7 +114,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
/// ///
/// The method requires the Write right. /// The method requires the Write right.
#[require(R > Write)] #[require(R > Write)]
pub fn replace(&self, page: DynUFrame, page_idx: usize) -> Result<()> { pub fn replace(&self, page: UFrame, page_idx: usize) -> Result<()> {
self.0.replace(page, page_idx) self.0.replace(page, page_idx)
} }

View File

@ -40,7 +40,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
.alloc_segment(nbytes / PAGE_SIZE) .alloc_segment(nbytes / PAGE_SIZE)
.unwrap(); .unwrap();
// Physical memory pages can be only accessed // Physical memory pages can be only accessed
// via the `DynUFrame` or `DynUSegment` abstraction. // via the `UFrame` or `USegment` abstraction.
segment.write_bytes(0, program).unwrap(); segment.write_bytes(0, program).unwrap();
segment segment
}; };

View File

@ -35,7 +35,7 @@ impl IntRemappingTable {
Some(self.handles.get(id).unwrap().clone()) Some(self.handles.get(id).unwrap().clone())
} }
/// Creates an Interrupt Remapping Table with one DynUFrame (default). /// Creates an Interrupt Remapping Table with one `Segment` (default).
pub(super) fn new() -> Self { pub(super) fn new() -> Self {
const DEFAULT_PAGES: usize = 1; const DEFAULT_PAGES: usize = 1;
let segment = FrameAllocOptions::new() let segment = FrameAllocOptions::new()

View File

@ -13,7 +13,7 @@ use crate::{
io::VmIoOnce, io::VmIoOnce,
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE}, kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
page_prop::CachePolicy, page_prop::CachePolicy,
DynUSegment, HasPaddr, Infallible, Paddr, PodOnce, UntypedMem, VmIo, VmReader, VmWriter, HasPaddr, Infallible, Paddr, PodOnce, USegment, UntypedMem, VmIo, VmReader, VmWriter,
PAGE_SIZE, PAGE_SIZE,
}, },
prelude::*, prelude::*,
@ -39,7 +39,7 @@ pub struct DmaCoherent {
#[derive(Debug)] #[derive(Debug)]
struct DmaCoherentInner { struct DmaCoherentInner {
segment: DynUSegment, segment: USegment,
start_daddr: Daddr, start_daddr: Daddr,
is_cache_coherent: bool, is_cache_coherent: bool,
} }
@ -54,10 +54,7 @@ impl DmaCoherent {
/// ///
/// The method fails if any part of the given `segment` /// The method fails if any part of the given `segment`
/// already belongs to a DMA mapping. /// already belongs to a DMA mapping.
pub fn map( pub fn map(segment: USegment, is_cache_coherent: bool) -> core::result::Result<Self, DmaError> {
segment: DynUSegment,
is_cache_coherent: bool,
) -> core::result::Result<Self, DmaError> {
let frame_count = segment.size() / PAGE_SIZE; let frame_count = segment.size() / PAGE_SIZE;
let start_paddr = segment.start_paddr(); let start_paddr = segment.start_paddr();
if !check_and_insert_dma_mapping(start_paddr, frame_count) { if !check_and_insert_dma_mapping(start_paddr, frame_count) {
@ -124,7 +121,7 @@ impl HasDaddr for DmaCoherent {
} }
impl Deref for DmaCoherent { impl Deref for DmaCoherent {
type Target = DynUSegment; type Target = USegment;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.inner.segment &self.inner.segment
} }

View File

@ -11,7 +11,7 @@ use crate::{
error::Error, error::Error,
mm::{ mm::{
dma::{dma_type, Daddr, DmaType}, dma::{dma_type, Daddr, DmaType},
DynUSegment, HasPaddr, Infallible, Paddr, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE, HasPaddr, Infallible, Paddr, USegment, UntypedMem, VmIo, VmReader, VmWriter, PAGE_SIZE,
}, },
}; };
@ -34,7 +34,7 @@ pub struct DmaStream {
#[derive(Debug)] #[derive(Debug)]
struct DmaStreamInner { struct DmaStreamInner {
segment: DynUSegment, segment: USegment,
start_daddr: Daddr, start_daddr: Daddr,
/// TODO: remove this field when on x86. /// TODO: remove this field when on x86.
#[allow(unused)] #[allow(unused)]
@ -55,11 +55,11 @@ pub enum DmaDirection {
} }
impl DmaStream { impl DmaStream {
/// Establishes DMA stream mapping for a given [`DynUSegment`]. /// Establishes DMA stream mapping for a given [`USegment`].
/// ///
/// The method fails if the segment already belongs to a DMA mapping. /// The method fails if the segment already belongs to a DMA mapping.
pub fn map( pub fn map(
segment: DynUSegment, segment: USegment,
direction: DmaDirection, direction: DmaDirection,
is_cache_coherent: bool, is_cache_coherent: bool,
) -> Result<Self, DmaError> { ) -> Result<Self, DmaError> {
@ -107,13 +107,13 @@ impl DmaStream {
}) })
} }
/// Gets the underlying [`DynUSegment`]. /// Gets the underlying [`USegment`].
/// ///
/// Usually, the CPU side should not access the memory /// Usually, the CPU side should not access the memory
/// after the DMA mapping is established because /// after the DMA mapping is established because
/// there is a chance that the device is updating /// there is a chance that the device is updating
/// the memory. Do this at your own risk. /// the memory. Do this at your own risk.
pub fn segment(&self) -> &DynUSegment { pub fn segment(&self) -> &USegment {
&self.inner.segment &self.inner.segment
} }

View File

@ -1,16 +1,13 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! The physical page memory allocator. //! The physical memory allocator.
//!
//! TODO: Decouple it with the frame allocator in [`crate::mm::frame::options`] by
//! allocating pages rather untyped memory from this module.
use align_ext::AlignExt; use align_ext::AlignExt;
use buddy_system_allocator::FrameAllocator; use buddy_system_allocator::FrameAllocator;
use log::info; use log::info;
use spin::Once; use spin::Once;
use super::{meta::FrameMeta, segment::Segment, Frame}; use super::{meta::AnyFrameMeta, segment::Segment, Frame};
use crate::{ use crate::{
boot::memory_region::MemoryRegionType, boot::memory_region::MemoryRegionType,
error::Error, error::Error,
@ -54,8 +51,8 @@ impl FrameAllocOptions {
} }
/// Allocates a single frame with additional metadata. /// Allocates a single frame with additional metadata.
pub fn alloc_frame_with<M: FrameMeta>(&self, metadata: M) -> Result<Frame<M>> { pub fn alloc_frame_with<M: AnyFrameMeta>(&self, metadata: M) -> Result<Frame<M>> {
let frame = PAGE_ALLOCATOR let frame = FRAME_ALLOCATOR
.get() .get()
.unwrap() .unwrap()
.disable_irq() .disable_irq()
@ -85,7 +82,7 @@ impl FrameAllocOptions {
/// ///
/// The returned [`Segment`] contains at least one frame. The method returns /// The returned [`Segment`] contains at least one frame. The method returns
/// an error if the number of frames is zero. /// an error if the number of frames is zero.
pub fn alloc_segment_with<M: FrameMeta, F>( pub fn alloc_segment_with<M: AnyFrameMeta, F>(
&self, &self,
nframes: usize, nframes: usize,
metadata_fn: F, metadata_fn: F,
@ -96,7 +93,7 @@ impl FrameAllocOptions {
if nframes == 0 { if nframes == 0 {
return Err(Error::InvalidArgs); return Err(Error::InvalidArgs);
} }
let segment = PAGE_ALLOCATOR let segment = FRAME_ALLOCATOR
.get() .get()
.unwrap() .unwrap()
.disable_irq() .disable_irq()
@ -168,6 +165,8 @@ impl CountingFrameAllocator {
} }
} }
// TODO: this method should be marked unsafe as invalid arguments will mess
// up the underlying allocator.
pub fn dealloc(&mut self, start_frame: usize, count: usize) { pub fn dealloc(&mut self, start_frame: usize, count: usize) {
self.allocator.dealloc(start_frame, count); self.allocator.dealloc(start_frame, count);
self.allocated -= count * PAGE_SIZE; self.allocated -= count * PAGE_SIZE;
@ -182,7 +181,7 @@ impl CountingFrameAllocator {
} }
} }
pub(in crate::mm) static PAGE_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>> = Once::new(); pub(in crate::mm) static FRAME_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>> = Once::new();
pub(crate) fn init() { pub(crate) fn init() {
let regions = crate::boot::memory_regions(); let regions = crate::boot::memory_regions();
@ -208,5 +207,5 @@ pub(crate) fn init() {
} }
} }
let counting_allocator = CountingFrameAllocator::new(allocator, total); let counting_allocator = CountingFrameAllocator::new(allocator, total);
PAGE_ALLOCATOR.call_once(|| SpinLock::new(counting_allocator)); FRAME_ALLOCATOR.call_once(|| SpinLock::new(counting_allocator));
} }

View File

@ -1,15 +1,18 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! Metadata management of pages. //! Metadata management of frames.
//! //!
//! You can picture a globally shared, static, gigantic array of metadata initialized for each page. //! You can picture a globally shared, static, gigantic array of metadata
//! An entry in the array is called a `MetaSlot`, which contains the metadata of a page. There would //! initialized for each frame. An entry in the array is called a [`MetaSlot`],
//! be a dedicated small "heap" space in each slot for dynamic metadata. You can store anything as the //! which contains the metadata of a frame. There would be a dedicated small
//! metadata of a page as long as it's [`Sync`]. //! "heap" space in each slot for dynamic metadata. You can store anything as
//! the metadata of a frame as long as it's [`Sync`].
//! //!
//! In the implementation level, the slots are placed in the metadata pages mapped to a certain virtual //! # Implementation
//! address. It is faster, simpler, safer and more versatile compared with an actual static array //!
//! implementation. //! The slots are placed in the metadata pages mapped to a certain virtual
//! address in the kernel space. So finding the metadata of a frame often
//! comes with no costs since the translation is a simple arithmetic operation.
pub(crate) mod mapping { pub(crate) mod mapping {
//! The metadata of each physical page is linear mapped to fixed virtual addresses //! The metadata of each physical page is linear mapped to fixed virtual addresses
@ -20,15 +23,15 @@ pub(crate) mod mapping {
use super::MetaSlot; use super::MetaSlot;
use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE}; use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE};
/// Converts a physical address of a base page to the virtual address of the metadata slot. /// Converts a physical address of a base frame to the virtual address of the metadata slot.
pub(crate) const fn page_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr { pub(crate) const fn frame_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr {
let base = FRAME_METADATA_RANGE.start; let base = FRAME_METADATA_RANGE.start;
let offset = paddr / PAGE_SIZE; let offset = paddr / PAGE_SIZE;
base + offset * size_of::<MetaSlot>() base + offset * size_of::<MetaSlot>()
} }
/// Converts a virtual address of the metadata slot to the physical address of the page. /// Converts a virtual address of the metadata slot to the physical address of the frame.
pub(crate) const fn meta_to_page<C: PagingConstsTrait>(vaddr: Vaddr) -> Paddr { pub(crate) const fn meta_to_frame<C: PagingConstsTrait>(vaddr: Vaddr) -> Paddr {
let base = FRAME_METADATA_RANGE.start; let base = FRAME_METADATA_RANGE.start;
let offset = (vaddr - base) / size_of::<MetaSlot>(); let offset = (vaddr - base) / size_of::<MetaSlot>();
offset * PAGE_SIZE offset * PAGE_SIZE
@ -58,28 +61,28 @@ use crate::{
panic::abort, panic::abort,
}; };
/// The maximum number of bytes of the metadata of a page. /// The maximum number of bytes of the metadata of a frame.
pub const PAGE_METADATA_MAX_SIZE: usize = pub const FRAME_METADATA_MAX_SIZE: usize =
META_SLOT_SIZE - size_of::<bool>() - size_of::<AtomicU32>() - size_of::<FrameMetaVtablePtr>(); META_SLOT_SIZE - size_of::<bool>() - size_of::<AtomicU32>() - size_of::<FrameMetaVtablePtr>();
/// The maximum alignment in bytes of the metadata of a page. /// The maximum alignment in bytes of the metadata of a frame.
pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::<MetaSlot>(); pub const FRAME_METADATA_MAX_ALIGN: usize = align_of::<MetaSlot>();
const META_SLOT_SIZE: usize = 64; const META_SLOT_SIZE: usize = 64;
#[repr(C)] #[repr(C)]
pub(in crate::mm) struct MetaSlot { pub(in crate::mm) struct MetaSlot {
/// The metadata of the page. /// The metadata of a frame.
/// ///
/// It is placed at the beginning of a slot because: /// It is placed at the beginning of a slot because:
/// - the implementation can simply cast a `*const MetaSlot` /// - the implementation can simply cast a `*const MetaSlot`
/// to a `*const FrameMeta` for manipulation; /// to a `*const AnyFrameMeta` for manipulation;
/// - if the metadata need special alignment, we can provide /// - if the metadata need special alignment, we can provide
/// at most `PAGE_METADATA_ALIGN` bytes of alignment; /// at most `PAGE_METADATA_ALIGN` bytes of alignment;
/// - the subsequent fields can utilize the padding of the /// - the subsequent fields can utilize the padding of the
/// reference count to save space. /// reference count to save space.
/// ///
/// Don't access this field by a reference to the slot. /// Don't access this field with a reference to the slot.
_storage: UnsafeCell<[u8; PAGE_METADATA_MAX_SIZE]>, _storage: UnsafeCell<[u8; FRAME_METADATA_MAX_SIZE]>,
/// The reference count of the page. /// The reference count of the page.
/// ///
/// Specifically, the reference count has the following meaning: /// Specifically, the reference count has the following meaning:
@ -94,7 +97,7 @@ pub(in crate::mm) struct MetaSlot {
/// [`Frame::from_unused`]: super::Frame::from_unused /// [`Frame::from_unused`]: super::Frame::from_unused
// //
// Other than this field the fields should be `MaybeUninit`. // Other than this field the fields should be `MaybeUninit`.
// See initialization in `alloc_meta_pages`. // See initialization in `alloc_meta_frames`.
pub(super) ref_count: AtomicU32, pub(super) ref_count: AtomicU32,
/// The virtual table that indicates the type of the metadata. /// The virtual table that indicates the type of the metadata.
pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>, pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>,
@ -103,46 +106,46 @@ pub(in crate::mm) struct MetaSlot {
pub(super) const REF_COUNT_UNUSED: u32 = u32::MAX; pub(super) const REF_COUNT_UNUSED: u32 = u32::MAX;
const REF_COUNT_MAX: u32 = i32::MAX as u32; const REF_COUNT_MAX: u32 = i32::MAX as u32;
type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn FrameMeta>; type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn AnyFrameMeta>;
const_assert_eq!(PAGE_SIZE % META_SLOT_SIZE, 0); const_assert_eq!(PAGE_SIZE % META_SLOT_SIZE, 0);
const_assert_eq!(size_of::<MetaSlot>(), META_SLOT_SIZE); const_assert_eq!(size_of::<MetaSlot>(), META_SLOT_SIZE);
/// All page metadata types must implement this trait. /// All frame metadata types must implement this trait.
/// ///
/// If a page type needs specific drop behavior, it should specify /// If a frame type needs specific drop behavior, it should specify
/// when implementing this trait. When we drop the last handle to /// when implementing this trait. When we drop the last handle to
/// this page, the `on_drop` method will be called. The `on_drop` /// this frame, the `on_drop` method will be called. The `on_drop`
/// method is called with the physical address of the page. /// method is called with the physical address of the frame.
/// ///
/// # Safety /// # Safety
/// ///
/// The implemented structure must have a size less than or equal to /// The implemented structure must have a size less than or equal to
/// [`PAGE_METADATA_MAX_SIZE`] and an alignment less than or equal to /// [`FRAME_METADATA_MAX_SIZE`] and an alignment less than or equal to
/// [`PAGE_METADATA_MAX_ALIGN`]. /// [`FRAME_METADATA_MAX_ALIGN`].
/// ///
/// The implementer of the `on_drop` method should ensure that the frame is /// The implementer of the `on_drop` method should ensure that the frame is
/// safe to be read. /// safe to be read.
pub unsafe trait FrameMeta: Any + Send + Sync + Debug + 'static { pub unsafe trait AnyFrameMeta: Any + Send + Sync + Debug + 'static {
/// Called when the last handle to the page is dropped. /// Called when the last handle to the frame is dropped.
fn on_drop(&mut self, reader: &mut VmReader<Infallible>) { fn on_drop(&mut self, reader: &mut VmReader<Infallible>) {
let _ = reader; let _ = reader;
} }
/// Whether the metadata's associated frame is untyped. /// Whether the metadata's associated frame is untyped.
/// ///
/// If a type implements [`UFrameMeta`], this should be `true`. /// If a type implements [`AnyUFrameMeta`], this should be `true`.
/// Otherwise, it should be `false`. /// Otherwise, it should be `false`.
/// ///
/// [`UFrameMeta`]: super::untyped::UFrameMeta /// [`AnyUFrameMeta`]: super::untyped::AnyUFrameMeta
fn is_untyped(&self) -> bool { fn is_untyped(&self) -> bool {
false false
} }
} }
/// Makes a structure usable as a page metadata. /// Makes a structure usable as a frame metadata.
/// ///
/// Directly implementing [`FrameMeta`] is not safe since the size and alignment /// Directly implementing [`AnyFrameMeta`] is not safe since the size and alignment
/// must be checked. This macro provides a safe way to implement the trait with /// must be checked. This macro provides a safe way to implement the trait with
/// compile-time checks. /// compile-time checks.
#[macro_export] #[macro_export]
@ -150,21 +153,21 @@ macro_rules! impl_frame_meta_for {
// Implement without specifying the drop behavior. // Implement without specifying the drop behavior.
($t:ty) => { ($t:ty) => {
use static_assertions::const_assert; use static_assertions::const_assert;
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE);
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN);
// SAFETY: The size and alignment of the structure are checked. // SAFETY: The size and alignment of the structure are checked.
unsafe impl $crate::mm::frame::meta::FrameMeta for $t {} unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t {}
}; };
} }
pub use impl_frame_meta_for; pub use impl_frame_meta_for;
impl MetaSlot { impl MetaSlot {
/// Increases the page reference count by one. /// Increases the frame reference count by one.
/// ///
/// # Safety /// # Safety
/// ///
/// The caller must have already held a reference to the page. /// The caller must have already held a reference to the frame.
pub(super) unsafe fn inc_ref_count(&self) { pub(super) unsafe fn inc_ref_count(&self) {
let last_ref_cnt = self.ref_count.fetch_add(1, Ordering::Relaxed); let last_ref_cnt = self.ref_count.fetch_add(1, Ordering::Relaxed);
debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED);
@ -182,8 +185,8 @@ impl MetaSlot {
/// ///
/// # Safety /// # Safety
/// ///
/// The caller should ensure that the pointer points to a page's metadata slot. The /// The caller should ensure that the pointer points to a frame's metadata slot. The
/// page should have a last handle to the page, and the page is about to be dropped, /// frame should have a last handle to the frame, and the frame is about to be dropped,
/// as the metadata slot after this operation becomes uninitialized. /// as the metadata slot after this operation becomes uninitialized.
pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) { pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
@ -193,14 +196,14 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
// This should be guaranteed as a safety requirement. // This should be guaranteed as a safety requirement.
debug_assert_eq!(slot.ref_count.load(Ordering::Relaxed), 0); debug_assert_eq!(slot.ref_count.load(Ordering::Relaxed), 0);
let paddr = mapping::meta_to_page::<PagingConsts>(ptr as Vaddr); let paddr = mapping::meta_to_frame::<PagingConsts>(ptr as Vaddr);
// SAFETY: We have exclusive access to the page metadata. // SAFETY: We have exclusive access to the frame metadata.
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() }; let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
// SAFETY: The page metadata is initialized and valid. // SAFETY: The frame metadata is initialized and valid.
let vtable_ptr = unsafe { vtable_ptr.assume_init_read() }; let vtable_ptr = unsafe { vtable_ptr.assume_init_read() };
let meta_ptr: *mut dyn FrameMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr); let meta_ptr: *mut dyn AnyFrameMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr);
// SAFETY: The implementer of the frame metadata decides that if the frame // SAFETY: The implementer of the frame metadata decides that if the frame
// is safe to be read or not. // is safe to be read or not.
@ -209,11 +212,11 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
// SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
// `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
// access to the page metadata. // access to the frame metadata.
unsafe { unsafe {
// Invoke the custom `on_drop` handler. // Invoke the custom `on_drop` handler.
(*meta_ptr).on_drop(&mut reader); (*meta_ptr).on_drop(&mut reader);
// Drop the page metadata. // Drop the frame metadata.
core::ptr::drop_in_place(meta_ptr); core::ptr::drop_in_place(meta_ptr);
} }
@ -221,24 +224,24 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
// be reordered after this memory store. // be reordered after this memory store.
slot.ref_count.store(REF_COUNT_UNUSED, Ordering::Release); slot.ref_count.store(REF_COUNT_UNUSED, Ordering::Release);
// Deallocate the page. // Deallocate the frame.
// It would return the page to the allocator for further use. This would be done // It would return the frame to the allocator for further use. This would be done
// after the release of the metadata to avoid re-allocation before the metadata // after the release of the metadata to avoid re-allocation before the metadata
// is reset. // is reset.
allocator::PAGE_ALLOCATOR allocator::FRAME_ALLOCATOR
.get() .get()
.unwrap() .unwrap()
.lock() .lock()
.dealloc(paddr / PAGE_SIZE, 1); .dealloc(paddr / PAGE_SIZE, 1);
} }
/// The metadata of pages that holds metadata of pages. /// The metadata of frames that holds metadata of frames.
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct MetaPageMeta {} pub struct MetaPageMeta {}
impl_frame_meta_for!(MetaPageMeta); impl_frame_meta_for!(MetaPageMeta);
/// Initializes the metadata of all physical pages. /// Initializes the metadata of all physical frames.
/// ///
/// The function returns a list of `Frame`s containing the metadata. /// The function returns a list of `Frame`s containing the metadata.
pub(crate) fn init() -> Segment<MetaPageMeta> { pub(crate) fn init() -> Segment<MetaPageMeta> {
@ -248,7 +251,7 @@ pub(crate) fn init() -> Segment<MetaPageMeta> {
}; };
info!( info!(
"Initializing page metadata for physical memory up to {:x}", "Initializing frame metadata for physical memory up to {:x}",
max_paddr max_paddr
); );
@ -256,14 +259,14 @@ pub(crate) fn init() -> Segment<MetaPageMeta> {
super::MAX_PADDR.store(max_paddr, Ordering::Relaxed); super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
let num_pages = max_paddr / page_size::<PagingConsts>(1); let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
let (num_meta_pages, meta_pages) = alloc_meta_pages(num_pages); let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames);
// Map the metadata pages. // Map the metadata frames.
boot_pt::with_borrow(|boot_pt| { boot_pt::with_borrow(|boot_pt| {
for i in 0..num_meta_pages { for i in 0..nr_meta_pages {
let frame_paddr = meta_pages + i * PAGE_SIZE; let frame_paddr = meta_pages + i * PAGE_SIZE;
let vaddr = mapping::page_to_meta::<PagingConsts>(0) + i * PAGE_SIZE; let vaddr = mapping::frame_to_meta::<PagingConsts>(0) + i * PAGE_SIZE;
let prop = PageProperty { let prop = PageProperty {
flags: PageFlags::RW, flags: PageFlags::RW,
cache: CachePolicy::Writeback, cache: CachePolicy::Writeback,
@ -275,41 +278,45 @@ pub(crate) fn init() -> Segment<MetaPageMeta> {
}) })
.unwrap(); .unwrap();
// Now the metadata pages are mapped, we can initialize the metadata. // Now the metadata frames are mapped, we can initialize the metadata.
Segment::from_unused(meta_pages..meta_pages + num_meta_pages * PAGE_SIZE, |_| { Segment::from_unused(meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE, |_| {
MetaPageMeta {} MetaPageMeta {}
}) })
} }
fn alloc_meta_pages(num_pages: usize) -> (usize, Paddr) { fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
let num_meta_pages = num_pages let nr_meta_pages = tot_nr_frames
.checked_mul(size_of::<MetaSlot>()) .checked_mul(size_of::<MetaSlot>())
.unwrap() .unwrap()
.div_ceil(PAGE_SIZE); .div_ceil(PAGE_SIZE);
let start_paddr = allocator::PAGE_ALLOCATOR let start_paddr = allocator::FRAME_ALLOCATOR
.get() .get()
.unwrap() .unwrap()
.lock() .lock()
.alloc(num_meta_pages) .alloc(nr_meta_pages)
.unwrap() .unwrap()
* PAGE_SIZE; * PAGE_SIZE;
let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot; let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
// Fill the metadata pages with a byte pattern of `REF_COUNT_UNUSED`. // Fill the metadata frames with a byte pattern of `REF_COUNT_UNUSED`.
debug_assert_eq!(REF_COUNT_UNUSED.to_ne_bytes(), [0xff, 0xff, 0xff, 0xff]); debug_assert_eq!(REF_COUNT_UNUSED.to_ne_bytes(), [0xff, 0xff, 0xff, 0xff]);
// SAFETY: `slots` and the length is a valid region for the metadata pages // SAFETY: `slots` and the length is a valid region for the metadata frames
// that are going to be treated as metadata slots. The byte pattern is // that are going to be treated as metadata slots. The byte pattern is
// valid as the initial value of the reference count (other fields are // valid as the initial value of the reference count (other fields are
// either not accessed or `MaybeUninit`). // either not accessed or `MaybeUninit`).
unsafe { unsafe {
core::ptr::write_bytes(slots as *mut u8, 0xff, num_pages * size_of::<MetaSlot>()); core::ptr::write_bytes(
slots as *mut u8,
0xff,
tot_nr_frames * size_of::<MetaSlot>(),
);
} }
(num_meta_pages, start_paddr) (nr_meta_pages, start_paddr)
} }
/// Adds a temporary linear mapping for the metadata pages. /// Adds a temporary linear mapping for the metadata frames.
/// ///
/// We only assume boot page table to contain 4G linear mapping. Thus if the /// We only assume boot page table to contain 4G linear mapping. Thus if the
/// physical memory is huge we end up depleted of linear virtual memory for /// physical memory is huge we end up depleted of linear virtual memory for

View File

@ -1,18 +1,35 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! Physical memory page management. //! Frame (physical memory page) management.
//! //!
//! A page is an aligned, contiguous range of bytes in physical memory. The sizes //! A frame is an aligned, contiguous range of bytes in physical memory. The
//! of base pages and huge pages are architecture-dependent. A page can be mapped //! sizes of base frames and huge frames (that are mapped as "huge pages") are
//! to a virtual address using the page table. //! architecture-dependent. A frame can be mapped to virtual address spaces
//! using the page table.
//! //!
//! Pages can be accessed through page handles, namely, [`Frame`]. A page handle //! Frames can be accessed through frame handles, namely, [`Frame`]. A frame
//! is a reference-counted handle to a page. When all handles to a page are dropped, //! handle is a reference-counted pointer to a frame. When all handles to a
//! the page is released and can be reused. //! frame are dropped, the frame is released and can be reused. Contiguous
//! frames are managed with [`Segment`].
//! //!
//! Pages can have dedicated metadata, which is implemented in the [`meta`] module. //! There are various kinds of frames. The top-level grouping of frame kinds
//! The reference count and usage of a page are stored in the metadata as well, leaving //! are "typed" frames and "untyped" frames. Typed frames host Rust objects
//! the handle only a pointer to the metadata. //! that must follow the visibility, lifetime and borrow rules of Rust, thus
//! not being able to be directly manipulated. Untyped frames are raw memory
//! that can be manipulated directly. So only untyped frames can be
//! - safely shared to external entities such as device drivers or user-space
//! applications.
//! - or directly manipulated with readers and writers that neglect Rust's
//! "alias XOR mutability" rule.
//!
//! The kind of a frame is determined by the type of its metadata. Untyped
//! frames have its metadata type that implements the [`UntypedFrameMeta`]
//! trait, while typed frames don't.
//!
//! Frames can have dedicated metadata, which is implemented in the [`meta`]
//! module. The reference count and usage of a frame are stored in the metadata
//! as well, leaving the handle only a pointer to the metadata slot. Users
//! can create custom metadata types by implementing the [`AnyFrameMeta`] trait.
pub mod allocator; pub mod allocator;
pub mod meta; pub mod meta;
@ -25,37 +42,42 @@ use core::{
}; };
use meta::{ use meta::{
mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED, mapping, AnyFrameMeta, MetaSlot, FRAME_METADATA_MAX_ALIGN, FRAME_METADATA_MAX_SIZE,
REF_COUNT_UNUSED,
}; };
pub use segment::Segment; pub use segment::Segment;
use untyped::{DynUFrame, UFrameMeta}; use untyped::{AnyUFrameMeta, UFrame};
use super::{PagingLevel, PAGE_SIZE}; use super::{PagingLevel, PAGE_SIZE};
use crate::mm::{Paddr, PagingConsts, Vaddr}; use crate::mm::{Paddr, PagingConsts, Vaddr};
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0); static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
/// A physical memory frame with a statically-known usage, whose metadata is represented by `M`. /// A smart pointer to a frame.
///
/// A frame is a contiguous range of bytes in physical memory. The [`Frame`]
/// type is a smart pointer to a frame that is reference-counted.
///
/// Frames are associated with metadata. The type of the metadata `M` is
/// determines the kind of the frame. If `M` implements [`AnyUFrameMeta`], the
/// frame is a untyped frame. Otherwise, it is a typed frame.
#[derive(Debug)] #[derive(Debug)]
#[repr(transparent)] #[repr(transparent)]
pub struct Frame<M: FrameMeta + ?Sized> { pub struct Frame<M: AnyFrameMeta + ?Sized> {
pub(super) ptr: *const MetaSlot, // TODO: We may use a `NonNull<M>` here to make the frame a maybe-fat
pub(super) _marker: PhantomData<M>, // pointer and implement `CoerceUnsized` to avoid `From`s. However this is
// not quite feasible currently because we cannot cast a must-be-fat
// pointer (`*const dyn AnyFrameMeta`) to a maybe-fat pointer (`NonNull<M>`).
ptr: *const MetaSlot,
_marker: PhantomData<M>,
} }
/// A physical memory frame with a dynamically-known usage. unsafe impl<M: AnyFrameMeta + ?Sized> Send for Frame<M> {}
///
/// The usage of this frame will not be changed while this object is alive. But the
/// usage is not known at compile time. An [`DynFrame`] as a parameter accepts any
/// type of frames.
pub type DynFrame = Frame<dyn FrameMeta>;
unsafe impl<M: FrameMeta + ?Sized> Send for Frame<M> {} unsafe impl<M: AnyFrameMeta + ?Sized> Sync for Frame<M> {}
unsafe impl<M: FrameMeta + ?Sized> Sync for Frame<M> {} impl<M: AnyFrameMeta> Frame<M> {
/// Gets a [`Frame`] with a specific usage from a raw, unused page.
impl<M: FrameMeta> Frame<M> {
/// Get a `Frame` handle with a specific usage from a raw, unused page.
/// ///
/// The caller should provide the initial metadata of the page. /// The caller should provide the initial metadata of the page.
/// ///
@ -68,11 +90,11 @@ impl<M: FrameMeta> Frame<M> {
assert!(paddr % PAGE_SIZE == 0); assert!(paddr % PAGE_SIZE == 0);
assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
// Checking unsafe preconditions of the `FrameMeta` trait. // Checking unsafe preconditions of the `AnyFrameMeta` trait.
debug_assert!(size_of::<M>() <= PAGE_METADATA_MAX_SIZE); debug_assert!(size_of::<M>() <= FRAME_METADATA_MAX_SIZE);
debug_assert!(align_of::<M>() <= PAGE_METADATA_MAX_ALIGN); debug_assert!(align_of::<M>() <= FRAME_METADATA_MAX_ALIGN);
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr); let vaddr = mapping::frame_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot; let ptr = vaddr as *const MetaSlot;
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an // SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
@ -88,13 +110,13 @@ impl<M: FrameMeta> Frame<M> {
// SAFETY: We have exclusive access to the page metadata. These fields are mutably // SAFETY: We have exclusive access to the page metadata. These fields are mutably
// borrowed only once. // borrowed only once.
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() }; let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta)); vtable_ptr.write(core::ptr::metadata(&metadata as &dyn AnyFrameMeta));
// SAFETY: // SAFETY:
// 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the // 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the
// metadata storage. // metadata storage.
// 2. The size and the alignment of the metadata storage is large enough to hold `M` // 2. The size and the alignment of the metadata storage is large enough to hold `M`
// (guaranteed by the safety requirement of the `FrameMeta` trait). // (guaranteed by the safety requirement of the `AnyFrameMeta` trait).
// 3. We have exclusive access to the metadata storage (guaranteed by the reference count). // 3. We have exclusive access to the metadata storage (guaranteed by the reference count).
unsafe { ptr.cast::<M>().cast_mut().write(metadata) }; unsafe { ptr.cast::<M>().cast_mut().write(metadata) };
@ -109,7 +131,7 @@ impl<M: FrameMeta> Frame<M> {
} }
} }
/// Get the metadata of this page. /// Gets the metadata of this page.
pub fn meta(&self) -> &M { pub fn meta(&self) -> &M {
// SAFETY: `self.ptr` points to the metadata storage which is valid to // SAFETY: `self.ptr` points to the metadata storage which is valid to
// be immutably borrowed as `M` because the type is correct, it lives // be immutably borrowed as `M` because the type is correct, it lives
@ -119,13 +141,13 @@ impl<M: FrameMeta> Frame<M> {
} }
} }
impl<M: FrameMeta + ?Sized> Frame<M> { impl<M: AnyFrameMeta + ?Sized> Frame<M> {
/// Get the physical address. /// Gets the physical address of the start of the frame.
pub fn start_paddr(&self) -> Paddr { pub fn start_paddr(&self) -> Paddr {
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr) mapping::meta_to_frame::<PagingConsts>(self.ptr as Vaddr)
} }
/// Get the paging level of this page. /// Gets the paging level of this page.
/// ///
/// This is the level of the page table entry that maps the frame, /// This is the level of the page table entry that maps the frame,
/// which determines the size of the frame. /// which determines the size of the frame.
@ -136,15 +158,15 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
1 1
} }
/// Size of this page in bytes. /// Gets the size of this page in bytes.
pub const fn size(&self) -> usize { pub const fn size(&self) -> usize {
PAGE_SIZE PAGE_SIZE
} }
/// Get the dyncamically-typed metadata of this frame. /// Gets the dyncamically-typed metadata of this frame.
/// ///
/// If the type is known at compile time, use [`Frame::meta`] instead. /// If the type is known at compile time, use [`Frame::meta`] instead.
pub fn dyn_meta(&self) -> &dyn FrameMeta { pub fn dyn_meta(&self) -> &dyn AnyFrameMeta {
let slot = self.slot(); let slot = self.slot();
// SAFETY: The page metadata is valid to be borrowed immutably, since it will never be // SAFETY: The page metadata is valid to be borrowed immutably, since it will never be
@ -154,7 +176,7 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
// SAFETY: The page metadata is initialized and valid. // SAFETY: The page metadata is initialized and valid.
let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() }; let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
let meta_ptr: *const dyn FrameMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr); let meta_ptr: *const dyn AnyFrameMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr);
// SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably // SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably
// borrowed under `vtable_ptr` because the vtable is correct, it lives under the given // borrowed under `vtable_ptr` because the vtable is correct, it lives under the given
@ -162,11 +184,11 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
unsafe { &*meta_ptr } unsafe { &*meta_ptr }
} }
/// Get the reference count of the page. /// Gets the reference count of the frame.
/// ///
/// It returns the number of all references to the page, including all the /// It returns the number of all references to the frame, including all the
/// existing page handles ([`Frame`], [`Frame<dyn FrameMeta>`]), and all the mappings in the /// existing frame handles ([`Frame`], [`Frame<dyn AnyFrameMeta>`]), and all
/// page table that points to the page. /// the mappings in the page table that points to the frame.
/// ///
/// # Safety /// # Safety
/// ///
@ -181,13 +203,13 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
unsafe { &(*self.ptr).ref_count } unsafe { &(*self.ptr).ref_count }
} }
/// Forget the handle to the page. /// Forgets the handle to the frame.
/// ///
/// This will result in the page being leaked without calling the custom dropper. /// This will result in the frame being leaked without calling the custom dropper.
/// ///
/// A physical address to the page is returned in case the page needs to be /// A physical address to the frame is returned in case the frame needs to be
/// restored using [`Frame::from_raw`] later. This is useful when some architectural /// restored using [`Frame::from_raw`] later. This is useful when some architectural
/// data structures need to hold the page handle such as the page table. /// data structures need to hold the frame handle such as the page table.
#[allow(unused)] #[allow(unused)]
pub(in crate::mm) fn into_raw(self) -> Paddr { pub(in crate::mm) fn into_raw(self) -> Paddr {
let paddr = self.start_paddr(); let paddr = self.start_paddr();
@ -195,7 +217,7 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
paddr paddr
} }
/// Restore a forgotten `Frame` from a physical address. /// Restores a forgotten `Frame` from a physical address.
/// ///
/// # Safety /// # Safety
/// ///
@ -205,10 +227,10 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
/// And the restoring operation should only be done once for a forgotten /// And the restoring operation should only be done once for a forgotten
/// `Frame`. Otherwise double-free will happen. /// `Frame`. Otherwise double-free will happen.
/// ///
/// Also, the caller ensures that the usage of the page is correct. There's /// Also, the caller ensures that the usage of the frame is correct. There's
/// no checking of the usage in this function. /// no checking of the usage in this function.
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self { pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr); let vaddr = mapping::frame_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot; let ptr = vaddr as *const MetaSlot;
Self { Self {
@ -224,9 +246,9 @@ impl<M: FrameMeta + ?Sized> Frame<M> {
} }
} }
impl<M: FrameMeta + ?Sized> Clone for Frame<M> { impl<M: AnyFrameMeta + ?Sized> Clone for Frame<M> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
// SAFETY: We have already held a reference to the page. // SAFETY: We have already held a reference to the frame.
unsafe { self.slot().inc_ref_count() }; unsafe { self.slot().inc_ref_count() };
Self { Self {
@ -236,7 +258,7 @@ impl<M: FrameMeta + ?Sized> Clone for Frame<M> {
} }
} }
impl<M: FrameMeta + ?Sized> Drop for Frame<M> { impl<M: AnyFrameMeta + ?Sized> Drop for Frame<M> {
fn drop(&mut self) { fn drop(&mut self) {
let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release); let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release);
debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED); debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED);
@ -254,84 +276,84 @@ impl<M: FrameMeta + ?Sized> Drop for Frame<M> {
} }
} }
impl<M: FrameMeta> TryFrom<Frame<dyn FrameMeta>> for Frame<M> { impl<M: AnyFrameMeta> TryFrom<Frame<dyn AnyFrameMeta>> for Frame<M> {
type Error = Frame<dyn FrameMeta>; type Error = Frame<dyn AnyFrameMeta>;
/// Try converting a [`Frame<dyn FrameMeta>`] into the statically-typed [`Frame`]. /// Tries converting a [`Frame<dyn AnyFrameMeta>`] into the statically-typed [`Frame`].
/// ///
/// If the usage of the page is not the same as the expected usage, it will /// If the usage of the frame is not the same as the expected usage, it will
/// return the dynamic page itself as is. /// return the dynamic frame itself as is.
fn try_from(dyn_frame: Frame<dyn FrameMeta>) -> Result<Self, Self::Error> { fn try_from(dyn_frame: Frame<dyn AnyFrameMeta>) -> Result<Self, Self::Error> {
if (dyn_frame.dyn_meta() as &dyn core::any::Any).is::<M>() { if (dyn_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<Frame<dyn FrameMeta>, Frame<M>>(dyn_frame) }) Ok(unsafe { core::mem::transmute::<Frame<dyn AnyFrameMeta>, Frame<M>>(dyn_frame) })
} else { } else {
Err(dyn_frame) Err(dyn_frame)
} }
} }
} }
impl<M: FrameMeta> From<Frame<M>> for Frame<dyn FrameMeta> { impl<M: AnyFrameMeta> From<Frame<M>> for Frame<dyn AnyFrameMeta> {
fn from(frame: Frame<M>) -> Self { fn from(frame: Frame<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) } unsafe { core::mem::transmute(frame) }
} }
} }
impl<M: UFrameMeta> From<Frame<M>> for DynUFrame { impl<M: AnyUFrameMeta> From<Frame<M>> for UFrame {
fn from(frame: Frame<M>) -> Self { fn from(frame: Frame<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) } unsafe { core::mem::transmute(frame) }
} }
} }
impl<M: UFrameMeta> From<&Frame<M>> for &DynUFrame { impl<M: AnyUFrameMeta> From<&Frame<M>> for &UFrame {
fn from(frame: &Frame<M>) -> Self { fn from(frame: &Frame<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) } unsafe { core::mem::transmute(frame) }
} }
} }
impl From<DynUFrame> for Frame<dyn FrameMeta> { impl From<UFrame> for Frame<dyn AnyFrameMeta> {
fn from(frame: DynUFrame) -> Self { fn from(frame: UFrame) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(frame) } unsafe { core::mem::transmute(frame) }
} }
} }
impl TryFrom<Frame<dyn FrameMeta>> for DynUFrame { impl TryFrom<Frame<dyn AnyFrameMeta>> for UFrame {
type Error = Frame<dyn FrameMeta>; type Error = Frame<dyn AnyFrameMeta>;
/// Try converting a [`Frame<dyn FrameMeta>`] into [`DynUFrame`]. /// Tries converting a [`Frame<dyn AnyFrameMeta>`] into [`UFrame`].
/// ///
/// If the usage of the page is not the same as the expected usage, it will /// If the usage of the frame is not the same as the expected usage, it will
/// return the dynamic page itself as is. /// return the dynamic frame itself as is.
fn try_from(dyn_frame: Frame<dyn FrameMeta>) -> Result<Self, Self::Error> { fn try_from(dyn_frame: Frame<dyn AnyFrameMeta>) -> Result<Self, Self::Error> {
if dyn_frame.dyn_meta().is_untyped() { if dyn_frame.dyn_meta().is_untyped() {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<Frame<dyn FrameMeta>, DynUFrame>(dyn_frame) }) Ok(unsafe { core::mem::transmute::<Frame<dyn AnyFrameMeta>, UFrame>(dyn_frame) })
} else { } else {
Err(dyn_frame) Err(dyn_frame)
} }
} }
} }
/// Increases the reference count of the page by one. /// Increases the reference count of the frame by one.
/// ///
/// # Safety /// # Safety
/// ///
/// The caller should ensure the following conditions: /// The caller should ensure the following conditions:
/// 1. The physical address must represent a valid page; /// 1. The physical address must represent a valid frame;
/// 2. The caller must have already held a reference to the page. /// 2. The caller must have already held a reference to the frame.
pub(in crate::mm) unsafe fn inc_page_ref_count(paddr: Paddr) { pub(in crate::mm) unsafe fn inc_frame_ref_count(paddr: Paddr) {
debug_assert!(paddr % PAGE_SIZE == 0); debug_assert!(paddr % PAGE_SIZE == 0);
debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr); debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
let vaddr: Vaddr = mapping::page_to_meta::<PagingConsts>(paddr); let vaddr: Vaddr = mapping::frame_to_meta::<PagingConsts>(paddr);
// SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking // SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking
// an immutable reference to it is always safe. // an immutable reference to it is always safe.
let slot = unsafe { &*(vaddr as *const MetaSlot) }; let slot = unsafe { &*(vaddr as *const MetaSlot) };
// SAFETY: We have already held a reference to the page. // SAFETY: We have already held a reference to the frame.
unsafe { slot.inc_ref_count() }; unsafe { slot.inc_ref_count() };
} }

View File

@ -1,66 +1,57 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! A contiguous range of pages. //! A contiguous range of frames.
use core::{mem::ManuallyDrop, ops::Range}; use core::{mem::ManuallyDrop, ops::Range};
use super::{inc_page_ref_count, meta::FrameMeta, Frame}; use super::{inc_frame_ref_count, meta::AnyFrameMeta, Frame};
use crate::mm::{Paddr, UFrameMeta, PAGE_SIZE}; use crate::mm::{AnyUFrameMeta, Paddr, PAGE_SIZE};
/// A contiguous range of homogeneous physical memory pages. /// A contiguous range of homogeneous physical memory frames.
/// ///
/// This is a handle to many contiguous pages. It will be more lightweight /// This is a handle to multiple contiguous frames. It will be more lightweight
/// than owning an array of page handles. /// than owning an array of frame handles.
/// ///
/// The ownership is achieved by the reference counting mechanism of pages. /// The ownership is achieved by the reference counting mechanism of frames.
/// When constructing a `Segment`, the page handles are created then /// When constructing a [`Segment`], the frame handles are created then
/// forgotten, leaving the reference count. When dropping a it, the page /// forgotten, leaving the reference count. When dropping a it, the frame
/// handles are restored and dropped, decrementing the reference count. /// handles are restored and dropped, decrementing the reference count.
/// ///
/// All the metadata of the pages are homogeneous, i.e., they are of the same /// All the metadata of the frames are homogeneous, i.e., they are of the same
/// type. /// type.
#[derive(Debug)] #[derive(Debug)]
#[repr(transparent)] #[repr(transparent)]
pub struct Segment<M: FrameMeta + ?Sized> { pub struct Segment<M: AnyFrameMeta + ?Sized> {
range: Range<Paddr>, range: Range<Paddr>,
_marker: core::marker::PhantomData<M>, _marker: core::marker::PhantomData<M>,
} }
/// A contiguous range of homogeneous physical memory frames that have any metadata. /// A contiguous range of homogeneous untyped physical memory frames that have any metadata.
///
/// In other words, the metadata of the frames are of the same type but the type
/// is not known at compile time. An [`DynSegment`] as a parameter accepts any
/// type of segments.
///
/// The usage of this frame will not be changed while this object is alive.
pub type DynSegment = Segment<dyn FrameMeta>;
/// A contiguous range of homogeneous untyped physical memory pages that have any metadata.
/// ///
/// In other words, the metadata of the frames are of the same type, and they /// In other words, the metadata of the frames are of the same type, and they
/// are untyped, but the type of metadata is not known at compile time. An /// are untyped, but the type of metadata is not known at compile time. An
/// [`DynUSegment`] as a parameter accepts any untyped segments. /// [`USegment`] as a parameter accepts any untyped segments.
/// ///
/// The usage of this frame will not be changed while this object is alive. /// The usage of this frame will not be changed while this object is alive.
pub type DynUSegment = Segment<dyn UFrameMeta>; pub type USegment = Segment<dyn AnyUFrameMeta>;
impl<M: FrameMeta + ?Sized> Drop for Segment<M> { impl<M: AnyFrameMeta + ?Sized> Drop for Segment<M> {
fn drop(&mut self) { fn drop(&mut self) {
for paddr in self.range.clone().step_by(PAGE_SIZE) { for paddr in self.range.clone().step_by(PAGE_SIZE) {
// SAFETY: for each page there would be a forgotten handle // SAFETY: for each frame there would be a forgotten handle
// when creating the `Segment` object. // when creating the `Segment` object.
drop(unsafe { Frame::<M>::from_raw(paddr) }); drop(unsafe { Frame::<M>::from_raw(paddr) });
} }
} }
} }
impl<M: FrameMeta + ?Sized> Clone for Segment<M> { impl<M: AnyFrameMeta + ?Sized> Clone for Segment<M> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
for paddr in self.range.clone().step_by(PAGE_SIZE) { for paddr in self.range.clone().step_by(PAGE_SIZE) {
// SAFETY: for each page there would be a forgotten handle // SAFETY: for each frame there would be a forgotten handle
// when creating the `Segment` object, so we already have // when creating the `Segment` object, so we already have
// reference counts for the pages. // reference counts for the frames.
unsafe { inc_page_ref_count(paddr) }; unsafe { inc_frame_ref_count(paddr) };
} }
Self { Self {
range: self.range.clone(), range: self.range.clone(),
@ -69,18 +60,18 @@ impl<M: FrameMeta + ?Sized> Clone for Segment<M> {
} }
} }
impl<M: FrameMeta> Segment<M> { impl<M: AnyFrameMeta> Segment<M> {
/// Creates a new `Segment` from unused pages. /// Creates a new [`Segment`] from unused frames.
/// ///
/// The caller must provide a closure to initialize metadata for all the pages. /// The caller must provide a closure to initialize metadata for all the frames.
/// The closure receives the physical address of the page and returns the /// The closure receives the physical address of the frame and returns the
/// metadata, which is similar to [`core::array::from_fn`]. /// metadata, which is similar to [`core::array::from_fn`].
/// ///
/// # Panics /// # Panics
/// ///
/// The function panics if: /// The function panics if:
/// - the physical address is invalid or not aligned; /// - the physical address is invalid or not aligned;
/// - any of the pages are already in use. /// - any of the frames are already in use.
pub fn from_unused<F>(range: Range<Paddr>, mut metadata_fn: F) -> Self pub fn from_unused<F>(range: Range<Paddr>, mut metadata_fn: F) -> Self
where where
F: FnMut(Paddr) -> M, F: FnMut(Paddr) -> M,
@ -95,26 +86,26 @@ impl<M: FrameMeta> Segment<M> {
} }
} }
impl<M: FrameMeta + ?Sized> Segment<M> { impl<M: AnyFrameMeta + ?Sized> Segment<M> {
/// Gets the start physical address of the contiguous pages. /// Gets the start physical address of the contiguous frames.
pub fn start_paddr(&self) -> Paddr { pub fn start_paddr(&self) -> Paddr {
self.range.start self.range.start
} }
/// Gets the end physical address of the contiguous pages. /// Gets the end physical address of the contiguous frames.
pub fn end_paddr(&self) -> Paddr { pub fn end_paddr(&self) -> Paddr {
self.range.end self.range.end
} }
/// Gets the length in bytes of the contiguous pages. /// Gets the length in bytes of the contiguous frames.
pub fn size(&self) -> usize { pub fn size(&self) -> usize {
self.range.end - self.range.start self.range.end - self.range.start
} }
/// Splits the pages into two at the given byte offset from the start. /// Splits the frames into two at the given byte offset from the start.
/// ///
/// The resulting pages cannot be empty. So the offset cannot be neither /// The resulting frames cannot be empty. So the offset cannot be neither
/// zero nor the length of the pages. /// zero nor the length of the frames.
/// ///
/// # Panics /// # Panics
/// ///
@ -139,10 +130,10 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
) )
} }
/// Gets an extra handle to the pages in the byte offset range. /// Gets an extra handle to the frames in the byte offset range.
/// ///
/// The sliced byte offset range in indexed by the offset from the start of /// The sliced byte offset range in indexed by the offset from the start of
/// the contiguous pages. The resulting pages holds extra reference counts. /// the contiguous frames. The resulting frames holds extra reference counts.
/// ///
/// # Panics /// # Panics
/// ///
@ -155,10 +146,10 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
assert!(start <= end && end <= self.range.end); assert!(start <= end && end <= self.range.end);
for paddr in (start..end).step_by(PAGE_SIZE) { for paddr in (start..end).step_by(PAGE_SIZE) {
// SAFETY: We already have reference counts for the pages since // SAFETY: We already have reference counts for the frames since
// for each page there would be a forgotten handle when creating // for each frame there would be a forgotten handle when creating
// the `Segment` object. // the `Segment` object.
unsafe { inc_page_ref_count(paddr) }; unsafe { inc_frame_ref_count(paddr) };
} }
Self { Self {
@ -168,10 +159,10 @@ impl<M: FrameMeta + ?Sized> Segment<M> {
} }
} }
impl<M: FrameMeta + ?Sized> From<Frame<M>> for Segment<M> { impl<M: AnyFrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
fn from(page: Frame<M>) -> Self { fn from(frame: Frame<M>) -> Self {
let pa = page.start_paddr(); let pa = frame.start_paddr();
let _ = ManuallyDrop::new(page); let _ = ManuallyDrop::new(frame);
Self { Self {
range: pa..pa + PAGE_SIZE, range: pa..pa + PAGE_SIZE,
_marker: core::marker::PhantomData, _marker: core::marker::PhantomData,
@ -179,25 +170,25 @@ impl<M: FrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
} }
} }
impl<M: FrameMeta + ?Sized> Iterator for Segment<M> { impl<M: AnyFrameMeta + ?Sized> Iterator for Segment<M> {
type Item = Frame<M>; type Item = Frame<M>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
if self.range.start < self.range.end { if self.range.start < self.range.end {
// SAFETY: each page in the range would be a handle forgotten // SAFETY: each frame in the range would be a handle forgotten
// when creating the `Segment` object. // when creating the `Segment` object.
let page = unsafe { Frame::<M>::from_raw(self.range.start) }; let frame = unsafe { Frame::<M>::from_raw(self.range.start) };
self.range.start += PAGE_SIZE; self.range.start += PAGE_SIZE;
// The end cannot be non-page-aligned. // The end cannot be non-page-aligned.
debug_assert!(self.range.start <= self.range.end); debug_assert!(self.range.start <= self.range.end);
Some(page) Some(frame)
} else { } else {
None None
} }
} }
} }
impl<M: FrameMeta> From<Segment<M>> for DynSegment { impl<M: AnyFrameMeta> From<Segment<M>> for Segment<dyn AnyFrameMeta> {
fn from(seg: Segment<M>) -> Self { fn from(seg: Segment<M>) -> Self {
let seg = ManuallyDrop::new(seg); let seg = ManuallyDrop::new(seg);
Self { Self {
@ -207,13 +198,13 @@ impl<M: FrameMeta> From<Segment<M>> for DynSegment {
} }
} }
impl<M: FrameMeta> TryFrom<DynSegment> for Segment<M> { impl<M: AnyFrameMeta> TryFrom<Segment<dyn AnyFrameMeta>> for Segment<M> {
type Error = DynSegment; type Error = Segment<dyn AnyFrameMeta>;
fn try_from(seg: DynSegment) -> core::result::Result<Self, Self::Error> { fn try_from(seg: Segment<dyn AnyFrameMeta>) -> core::result::Result<Self, Self::Error> {
// SAFETY: for each page there would be a forgotten handle // SAFETY: for each page there would be a forgotten handle
// when creating the `Segment` object. // when creating the `Segment` object.
let first_frame = unsafe { Frame::<dyn FrameMeta>::from_raw(seg.range.start) }; let first_frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(seg.range.start) };
let first_frame = ManuallyDrop::new(first_frame); let first_frame = ManuallyDrop::new(first_frame);
if !(first_frame.dyn_meta() as &dyn core::any::Any).is::<M>() { if !(first_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
return Err(seg); return Err(seg);
@ -223,41 +214,41 @@ impl<M: FrameMeta> TryFrom<DynSegment> for Segment<M> {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
for paddr in seg.range.clone().step_by(PAGE_SIZE) { for paddr in seg.range.clone().step_by(PAGE_SIZE) {
let frame = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) }; let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
let frame = ManuallyDrop::new(frame); let frame = ManuallyDrop::new(frame);
debug_assert!((frame.dyn_meta() as &dyn core::any::Any).is::<M>()); debug_assert!((frame.dyn_meta() as &dyn core::any::Any).is::<M>());
} }
} }
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<DynSegment, Segment<M>>(seg) }) Ok(unsafe { core::mem::transmute::<Segment<dyn AnyFrameMeta>, Segment<M>>(seg) })
} }
} }
impl<M: UFrameMeta> From<Segment<M>> for DynUSegment { impl<M: AnyUFrameMeta> From<Segment<M>> for USegment {
fn from(seg: Segment<M>) -> Self { fn from(seg: Segment<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(seg) } unsafe { core::mem::transmute(seg) }
} }
} }
impl<M: UFrameMeta> From<&Segment<M>> for &DynUSegment { impl<M: AnyUFrameMeta> From<&Segment<M>> for &USegment {
fn from(seg: &Segment<M>) -> Self { fn from(seg: &Segment<M>) -> Self {
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
unsafe { core::mem::transmute(seg) } unsafe { core::mem::transmute(seg) }
} }
} }
impl TryFrom<DynSegment> for DynUSegment { impl TryFrom<Segment<dyn AnyFrameMeta>> for USegment {
type Error = DynSegment; type Error = Segment<dyn AnyFrameMeta>;
/// Try converting a [`DynSegment`] into [`DynUSegment`]. /// Try converting a [`Segment<dyn AnyFrameMeta>`] into [`USegment`].
/// ///
/// If the usage of the page is not the same as the expected usage, it will /// If the usage of the page is not the same as the expected usage, it will
/// return the dynamic page itself as is. /// return the dynamic page itself as is.
fn try_from(seg: DynSegment) -> core::result::Result<Self, Self::Error> { fn try_from(seg: Segment<dyn AnyFrameMeta>) -> core::result::Result<Self, Self::Error> {
// SAFETY: for each page there would be a forgotten handle // SAFETY: for each page there would be a forgotten handle
// when creating the `Segment` object. // when creating the `Segment` object.
let first_frame = unsafe { Frame::<dyn FrameMeta>::from_raw(seg.range.start) }; let first_frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(seg.range.start) };
let first_frame = ManuallyDrop::new(first_frame); let first_frame = ManuallyDrop::new(first_frame);
if !first_frame.dyn_meta().is_untyped() { if !first_frame.dyn_meta().is_untyped() {
return Err(seg); return Err(seg);
@ -267,12 +258,12 @@ impl TryFrom<DynSegment> for DynUSegment {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
for paddr in seg.range.clone().step_by(PAGE_SIZE) { for paddr in seg.range.clone().step_by(PAGE_SIZE) {
let frame = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) }; let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
let frame = ManuallyDrop::new(frame); let frame = ManuallyDrop::new(frame);
debug_assert!(frame.dyn_meta().is_untyped()); debug_assert!(frame.dyn_meta().is_untyped());
} }
} }
// SAFETY: The metadata is coerceable and the struct is transmutable. // SAFETY: The metadata is coerceable and the struct is transmutable.
Ok(unsafe { core::mem::transmute::<DynSegment, DynUSegment>(seg) }) Ok(unsafe { core::mem::transmute::<Segment<dyn AnyFrameMeta>, USegment>(seg) })
} }
} }

View File

@ -2,13 +2,12 @@
//! Untyped physical memory management. //! Untyped physical memory management.
//! //!
//! A frame is a special page that is _untyped_ memory. //! As detailed in [`crate::mm::frame`], untyped memory can be accessed with
//! It is used to store data irrelevant to the integrity of the kernel. //! relaxed rules but we cannot create references to them. This module provides
//! All pages mapped to the virtual address space of the users are backed by //! the declaration of untyped frames and segments, and the implementation of
//! frames. Frames, with all the properties of pages, can additionally be safely //! extra functionalities (such as [`VmIo`]) for them.
//! read and written by the kernel or the user.
use super::{meta::FrameMeta, Frame, Segment}; use super::{meta::AnyFrameMeta, Frame, Segment};
use crate::{ use crate::{
mm::{ mm::{
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter}, io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
@ -19,24 +18,25 @@ use crate::{
/// The metadata of untyped frame. /// The metadata of untyped frame.
/// ///
/// If a structure `M` implements [`UFrameMeta`], it can be used as the /// If a structure `M` implements [`AnyUFrameMeta`], it can be used as the
/// metadata of a type of untyped frames [`Frame<M>`]. All frames of such type /// metadata of a type of untyped frames [`Frame<M>`]. All frames of such type
/// will be accessible as untyped memory. /// will be accessible as untyped memory.
pub trait UFrameMeta: FrameMeta {} pub trait AnyUFrameMeta: AnyFrameMeta {}
/// An untyped frame with any metadata. /// A smart pointer to an untyped frame with any metadata.
///
/// The metadata of the frame is not known at compile time but the frame must
/// be an untyped one. An [`UFrame`] as a parameter accepts any type of
/// untyped frame metadata.
/// ///
/// The usage of this frame will not be changed while this object is alive. /// The usage of this frame will not be changed while this object is alive.
/// The metadata of the frame is not known at compile time but the frame must pub type UFrame = Frame<dyn AnyUFrameMeta>;
/// be an untyped one. An [`DynUFrame`] as a parameter accepts any type of
/// untyped frame metadata.
pub type DynUFrame = Frame<dyn UFrameMeta>;
/// Makes a structure usable as untyped frame metadata. /// Makes a structure usable as untyped frame metadata.
/// ///
/// Directly implementing [`FrameMeta`] is not safe since the size and /// Directly implementing [`AnyFrameMeta`] is not safe since the size and
/// alignment must be checked. This macro provides a safe way to implement both /// alignment must be checked. This macro provides a safe way to implement both
/// [`FrameMeta`] and [`UFrameMeta`] with compile-time checks. /// [`AnyFrameMeta`] and [`AnyUFrameMeta`] with compile-time checks.
/// ///
/// If this macro is used for built-in typed frame metadata, it won't compile. /// If this macro is used for built-in typed frame metadata, it won't compile.
#[macro_export] #[macro_export]
@ -44,25 +44,25 @@ macro_rules! impl_untyped_frame_meta_for {
// Implement without specifying the drop behavior. // Implement without specifying the drop behavior.
($t:ty) => { ($t:ty) => {
use static_assertions::const_assert; use static_assertions::const_assert;
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE);
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN);
// SAFETY: The size and alignment of the structure are checked. // SAFETY: The size and alignment of the structure are checked.
unsafe impl $crate::mm::frame::meta::FrameMeta for $t { unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t {
fn is_untyped(&self) -> bool { fn is_untyped(&self) -> bool {
true true
} }
} }
impl $crate::mm::frame::untyped::UFrameMeta for $t {} impl $crate::mm::frame::untyped::AnyUFrameMeta for $t {}
}; };
// Implement with a customized drop function. // Implement with a customized drop function.
($t:ty, $body:expr) => { ($t:ty, $body:expr) => {
use static_assertions::const_assert; use static_assertions::const_assert;
const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE); const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE);
const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN); const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN);
// SAFETY: The size and alignment of the structure are checked. // SAFETY: The size and alignment of the structure are checked.
// Outside OSTD the user cannot implement a `on_drop` method for typed // Outside OSTD the user cannot implement a `on_drop` method for typed
// frames. And untyped frames can be safely read. // frames. And untyped frames can be safely read.
unsafe impl $crate::mm::frame::meta::FrameMeta for $t { unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t {
fn on_drop(&mut self, reader: &mut $crate::mm::VmReader<$crate::mm::Infallible>) { fn on_drop(&mut self, reader: &mut $crate::mm::VmReader<$crate::mm::Infallible>) {
$body $body
} }
@ -71,7 +71,7 @@ macro_rules! impl_untyped_frame_meta_for {
true true
} }
} }
impl $crate::mm::frame::untyped::UFrameMeta for $t {} impl $crate::mm::frame::untyped::AnyUFrameMeta for $t {}
}; };
} }
@ -91,7 +91,7 @@ pub trait UntypedMem {
macro_rules! impl_untyped_for { macro_rules! impl_untyped_for {
($t:ident) => { ($t:ident) => {
impl<UM: UFrameMeta + ?Sized> UntypedMem for $t<UM> { impl<UM: AnyUFrameMeta + ?Sized> UntypedMem for $t<UM> {
fn reader(&self) -> VmReader<'_, Infallible> { fn reader(&self) -> VmReader<'_, Infallible> {
let ptr = paddr_to_vaddr(self.start_paddr()) as *const u8; let ptr = paddr_to_vaddr(self.start_paddr()) as *const u8;
// SAFETY: Only untyped frames are allowed to be read. // SAFETY: Only untyped frames are allowed to be read.
@ -105,7 +105,7 @@ macro_rules! impl_untyped_for {
} }
} }
impl<UM: UFrameMeta + ?Sized> VmIo for $t<UM> { impl<UM: AnyUFrameMeta + ?Sized> VmIo for $t<UM> {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let read_len = writer.avail().min(self.size().saturating_sub(offset)); let read_len = writer.avail().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind // Do bound check with potential integer overflow in mind
@ -151,12 +151,12 @@ use core::{marker::PhantomData, mem::ManuallyDrop, ops::Deref};
/// `FrameRef` is a struct that can work as `&'a Frame<m>`. /// `FrameRef` is a struct that can work as `&'a Frame<m>`.
/// ///
/// This is solely useful for [`crate::collections::xarray`]. /// This is solely useful for [`crate::collections::xarray`].
pub struct FrameRef<'a, M: UFrameMeta + ?Sized> { pub struct FrameRef<'a, M: AnyUFrameMeta + ?Sized> {
inner: ManuallyDrop<Frame<M>>, inner: ManuallyDrop<Frame<M>>,
_marker: PhantomData<&'a Frame<M>>, _marker: PhantomData<&'a Frame<M>>,
} }
impl<M: UFrameMeta + ?Sized> Deref for FrameRef<'_, M> { impl<M: AnyUFrameMeta + ?Sized> Deref for FrameRef<'_, M> {
type Target = Frame<M>; type Target = Frame<M>;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
@ -166,7 +166,7 @@ impl<M: UFrameMeta + ?Sized> Deref for FrameRef<'_, M> {
// SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer. // SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
// The pointer is also aligned to 4. // The pointer is also aligned to 4.
unsafe impl<M: UFrameMeta + ?Sized> xarray::ItemEntry for Frame<M> { unsafe impl<M: AnyUFrameMeta + ?Sized> xarray::ItemEntry for Frame<M> {
type Ref<'a> type Ref<'a>
= FrameRef<'a, M> = FrameRef<'a, M>
where where

View File

@ -11,7 +11,7 @@ use spin::Once;
use super::paddr_to_vaddr; use super::paddr_to_vaddr;
use crate::{ use crate::{
mm::{frame::allocator::PAGE_ALLOCATOR, PAGE_SIZE}, mm::{frame::allocator::FRAME_ALLOCATOR, PAGE_SIZE},
prelude::*, prelude::*,
sync::SpinLock, sync::SpinLock,
trap::disable_local, trap::disable_local,
@ -94,7 +94,7 @@ impl LockedHeapWithRescue {
}; };
let allocation_start = { let allocation_start = {
let mut page_allocator = PAGE_ALLOCATOR.get().unwrap().lock(); let mut page_allocator = FRAME_ALLOCATOR.get().unwrap().lock();
if num_frames >= MIN_NUM_FRAMES { if num_frames >= MIN_NUM_FRAMES {
page_allocator.alloc(num_frames).ok_or(Error::NoMemory)? page_allocator.alloc(num_frames).ok_or(Error::NoMemory)?
} else { } else {

View File

@ -7,11 +7,11 @@
//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and //! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_. //! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory //! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
//! (e.g., `&[u8]`) or untyped memory (e.g, [`DynUFrame`]). Behind the scene, `VmReader` and `VmWriter` //! (e.g., `&[u8]`) or untyped memory (e.g, [`UFrame`]). Behind the scene, `VmReader` and `VmWriter`
//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose //! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
//! safety depends on whether the given memory regions are _valid_ or not. //! safety depends on whether the given memory regions are _valid_ or not.
//! //!
//! [`DynUFrame`]: crate::mm::DynUFrame //! [`UFrame`]: crate::mm::UFrame
//! [`from_user_space`]: `VmReader::from_user_space` //! [`from_user_space`]: `VmReader::from_user_space`
//! [`from_kernel_space`]: `VmReader::from_kernel_space` //! [`from_kernel_space`]: `VmReader::from_kernel_space`
//! //!
@ -58,7 +58,7 @@ use crate::{
}; };
/// A trait that enables reading/writing data from/to a VM object, /// A trait that enables reading/writing data from/to a VM object,
/// e.g., [`DynUSegment`], [`Vec<DynUFrame>`] and [`DynUFrame`]. /// e.g., [`USegment`], [`Vec<UFrame>`] and [`UFrame`].
/// ///
/// # Concurrency /// # Concurrency
/// ///
@ -67,8 +67,8 @@ use crate::{
/// desire predictability or atomicity, the users should add extra mechanism /// desire predictability or atomicity, the users should add extra mechanism
/// for such properties. /// for such properties.
/// ///
/// [`DynUSegment`]: crate::mm::DynUSegment /// [`USegment`]: crate::mm::USegment
/// [`DynUFrame`]: crate::mm::DynUFrame /// [`UFrame`]: crate::mm::UFrame
pub trait VmIo: Send + Sync { pub trait VmIo: Send + Sync {
/// Reads requested data at a specified offset into a given `VmWriter`. /// Reads requested data at a specified offset into a given `VmWriter`.
/// ///

View File

@ -11,7 +11,7 @@ use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE};
use crate::{ use crate::{
cpu::CpuSet, cpu::CpuSet,
mm::{ mm::{
frame::{meta::FrameMeta, Frame}, frame::{meta::AnyFrameMeta, Frame},
page_prop::PageProperty, page_prop::PageProperty,
page_table::PageTableItem, page_table::PageTableItem,
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
@ -204,7 +204,7 @@ impl<M: AllocatorSelector + 'static> KVirtArea<M> {
impl KVirtArea<Tracked> { impl KVirtArea<Tracked> {
/// Maps pages into the kernel virtual area. /// Maps pages into the kernel virtual area.
pub fn map_pages<T: FrameMeta>( pub fn map_pages<T: AnyFrameMeta>(
&mut self, &mut self,
range: Range<Vaddr>, range: Range<Vaddr>,
pages: impl Iterator<Item = Frame<T>>, pages: impl Iterator<Item = Frame<T>>,
@ -232,7 +232,7 @@ impl KVirtArea<Tracked> {
/// ///
/// This function returns None if the address is not mapped (`NotMapped`), /// This function returns None if the address is not mapped (`NotMapped`),
/// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page. /// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page.
pub fn get_page(&self, addr: Vaddr) -> Option<Frame<dyn FrameMeta>> { pub fn get_page(&self, addr: Vaddr) -> Option<Frame<dyn AnyFrameMeta>> {
let query_result = self.query_page(addr); let query_result = self.query_page(addr);
match query_result { match query_result {
PageTableItem::Mapped { PageTableItem::Mapped {

View File

@ -163,7 +163,7 @@ pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
// Map the metadata pages. // Map the metadata pages.
{ {
let start_va = mapping::page_to_meta::<PagingConsts>(0); let start_va = mapping::frame_to_meta::<PagingConsts>(0);
let from = start_va..start_va + meta_pages.size(); let from = start_va..start_va + meta_pages.size();
let prop = PageProperty { let prop = PageProperty {
flags: PageFlags::RW, flags: PageFlags::RW,

View File

@ -26,9 +26,9 @@ pub use self::{
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr}, dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
frame::{ frame::{
allocator::FrameAllocOptions, allocator::FrameAllocOptions,
segment::{DynSegment, DynUSegment, Segment}, segment::{Segment, USegment},
untyped::{DynUFrame, UFrameMeta, UntypedMem}, untyped::{AnyUFrameMeta, UFrame, UntypedMem},
DynFrame, Frame, Frame,
}, },
io::{ io::{
Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader, Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader,

View File

@ -16,8 +16,8 @@ use crate::{
cpu::num_cpus, cpu::num_cpus,
cpu_local_cell, cpu_local_cell,
mm::{ mm::{
frame::allocator::PAGE_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, PageProperty, frame::allocator::FRAME_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr,
PagingConstsTrait, Vaddr, PAGE_SIZE, PageProperty, PagingConstsTrait, Vaddr, PAGE_SIZE,
}, },
sync::SpinLock, sync::SpinLock,
}; };
@ -221,7 +221,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
} }
fn alloc_frame(&mut self) -> FrameNumber { fn alloc_frame(&mut self) -> FrameNumber {
let frame = PAGE_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap(); let frame = FRAME_ALLOCATOR.get().unwrap().lock().alloc(1).unwrap();
self.frames.push(frame); self.frames.push(frame);
// Zero it out. // Zero it out.
let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8; let vaddr = paddr_to_vaddr(frame * PAGE_SIZE) as *mut u8;
@ -233,7 +233,7 @@ impl<E: PageTableEntryTrait, C: PagingConstsTrait> BootPageTable<E, C> {
impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> { impl<E: PageTableEntryTrait, C: PagingConstsTrait> Drop for BootPageTable<E, C> {
fn drop(&mut self) { fn drop(&mut self) {
for frame in &self.frames { for frame in &self.frames {
PAGE_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1); FRAME_ALLOCATOR.get().unwrap().lock().dealloc(*frame, 1);
} }
} }
} }

View File

@ -76,7 +76,7 @@ use super::{
}; };
use crate::{ use crate::{
mm::{ mm::{
frame::{meta::FrameMeta, Frame}, frame::{meta::AnyFrameMeta, Frame},
kspace::should_map_as_tracked, kspace::should_map_as_tracked,
paddr_to_vaddr, Paddr, PageProperty, Vaddr, paddr_to_vaddr, Paddr, PageProperty, Vaddr,
}, },
@ -91,7 +91,7 @@ pub enum PageTableItem {
}, },
Mapped { Mapped {
va: Vaddr, va: Vaddr,
page: Frame<dyn FrameMeta>, page: Frame<dyn AnyFrameMeta>,
prop: PageProperty, prop: PageProperty,
}, },
#[allow(dead_code)] #[allow(dead_code)]
@ -402,9 +402,9 @@ where
self.0.query() self.0.query()
} }
/// Maps the range starting from the current address to a [`Frame<dyn FrameMeta>`]. /// Maps the range starting from the current address to a [`Frame<dyn AnyFrameMeta>`].
/// ///
/// It returns the previously mapped [`Frame<dyn FrameMeta>`] if that exists. /// It returns the previously mapped [`Frame<dyn AnyFrameMeta>`] if that exists.
/// ///
/// # Panics /// # Panics
/// ///
@ -419,9 +419,9 @@ where
/// not affect kernel's memory safety. /// not affect kernel's memory safety.
pub unsafe fn map( pub unsafe fn map(
&mut self, &mut self,
page: Frame<dyn FrameMeta>, page: Frame<dyn AnyFrameMeta>,
prop: PageProperty, prop: PageProperty,
) -> Option<Frame<dyn FrameMeta>> { ) -> Option<Frame<dyn AnyFrameMeta>> {
let end = self.0.va + page.size(); let end = self.0.va + page.size();
assert!(end <= self.0.barrier_va.end); assert!(end <= self.0.barrier_va.end);

View File

@ -8,7 +8,7 @@ use super::{MapTrackingStatus, PageTableEntryTrait, RawPageTableNode};
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
mm::{ mm::{
frame::{inc_page_ref_count, meta::FrameMeta, Frame}, frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame},
page_prop::PageProperty, page_prop::PageProperty,
Paddr, PagingConstsTrait, PagingLevel, Paddr, PagingConstsTrait, PagingLevel,
}, },
@ -27,7 +27,7 @@ pub(in crate::mm) enum Child<
[(); C::NR_LEVELS as usize]:, [(); C::NR_LEVELS as usize]:,
{ {
PageTable(RawPageTableNode<E, C>), PageTable(RawPageTableNode<E, C>),
Frame(Frame<dyn FrameMeta>, PageProperty), Frame(Frame<dyn AnyFrameMeta>, PageProperty),
/// Pages not tracked by handles. /// Pages not tracked by handles.
Untracked(Paddr, PagingLevel, PageProperty), Untracked(Paddr, PagingLevel, PageProperty),
None, None,
@ -119,7 +119,7 @@ where
match is_tracked { match is_tracked {
MapTrackingStatus::Tracked => { MapTrackingStatus::Tracked => {
// SAFETY: The physical address points to a valid page. // SAFETY: The physical address points to a valid page.
let page = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) }; let page = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
Child::Frame(page, pte.prop()) Child::Frame(page, pte.prop())
} }
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),
@ -150,7 +150,7 @@ where
if !pte.is_last(level) { if !pte.is_last(level) {
// SAFETY: The physical address is valid and the PTE already owns // SAFETY: The physical address is valid and the PTE already owns
// the reference to the page. // the reference to the page.
unsafe { inc_page_ref_count(paddr) }; unsafe { inc_frame_ref_count(paddr) };
// SAFETY: The physical address points to a valid page table node // SAFETY: The physical address points to a valid page table node
// at the given level. // at the given level.
return Child::PageTable(unsafe { RawPageTableNode::from_raw_parts(paddr, level - 1) }); return Child::PageTable(unsafe { RawPageTableNode::from_raw_parts(paddr, level - 1) });
@ -160,9 +160,9 @@ where
MapTrackingStatus::Tracked => { MapTrackingStatus::Tracked => {
// SAFETY: The physical address is valid and the PTE already owns // SAFETY: The physical address is valid and the PTE already owns
// the reference to the page. // the reference to the page.
unsafe { inc_page_ref_count(paddr) }; unsafe { inc_frame_ref_count(paddr) };
// SAFETY: The physical address points to a valid page. // SAFETY: The physical address points to a valid page.
let page = unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) }; let page = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
Child::Frame(page, pte.prop()) Child::Frame(page, pte.prop())
} }
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),

View File

@ -40,7 +40,7 @@ use super::{nr_subpage_per_huge, PageTableEntryTrait};
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
mm::{ mm::{
frame::{inc_page_ref_count, meta::FrameMeta, Frame}, frame::{inc_frame_ref_count, meta::AnyFrameMeta, Frame},
paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel, paddr_to_vaddr, FrameAllocOptions, Infallible, Paddr, PagingConstsTrait, PagingLevel,
VmReader, VmReader,
}, },
@ -166,7 +166,7 @@ where
// SAFETY: We have a reference count to the page and can safely increase the reference // SAFETY: We have a reference count to the page and can safely increase the reference
// count by one more. // count by one more.
unsafe { unsafe {
inc_page_ref_count(self.paddr()); inc_frame_ref_count(self.paddr());
} }
} }
@ -405,7 +405,7 @@ where
// SAFETY: The layout of the `PageTablePageMeta` is ensured to be the same for // SAFETY: The layout of the `PageTablePageMeta` is ensured to be the same for
// all possible generic parameters. And the layout fits the requirements. // all possible generic parameters. And the layout fits the requirements.
unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> FrameMeta for PageTablePageMeta<E, C> unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> AnyFrameMeta for PageTablePageMeta<E, C>
where where
[(); C::NR_LEVELS as usize]:, [(); C::NR_LEVELS as usize]:,
{ {
@ -434,7 +434,7 @@ where
} else if is_tracked == MapTrackingStatus::Tracked { } else if is_tracked == MapTrackingStatus::Tracked {
// SAFETY: The PTE points to a tracked page. The ownership // SAFETY: The PTE points to a tracked page. The ownership
// of the child is transferred to the child then dropped. // of the child is transferred to the child then dropped.
drop(unsafe { Frame::<dyn FrameMeta>::from_raw(paddr) }); drop(unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) });
} }
} }
} }

View File

@ -2,7 +2,7 @@
//! APIs for memory statistics. //! APIs for memory statistics.
use crate::mm::frame::allocator::PAGE_ALLOCATOR; use crate::mm::frame::allocator::FRAME_ALLOCATOR;
/// Total memory available for any usages in the system (in bytes). /// Total memory available for any usages in the system (in bytes).
/// ///
@ -10,12 +10,12 @@ use crate::mm::frame::allocator::PAGE_ALLOCATOR;
/// in most occasions. For example, bad memory, kernel statically-allocated /// in most occasions. For example, bad memory, kernel statically-allocated
/// memory or firmware reserved memories do not count. /// memory or firmware reserved memories do not count.
pub fn mem_total() -> usize { pub fn mem_total() -> usize {
PAGE_ALLOCATOR.get().unwrap().lock().mem_total() FRAME_ALLOCATOR.get().unwrap().lock().mem_total()
} }
/// Current readily available memory (in bytes). /// Current readily available memory (in bytes).
/// ///
/// Such memory can be directly used for allocation without reclaiming. /// Such memory can be directly used for allocation without reclaiming.
pub fn mem_available() -> usize { pub fn mem_available() -> usize {
PAGE_ALLOCATOR.get().unwrap().lock().mem_available() FRAME_ALLOCATOR.get().unwrap().lock().mem_available()
} }

View File

@ -6,7 +6,7 @@ use alloc::vec::Vec;
use core::ops::Range; use core::ops::Range;
use super::{ use super::{
frame::{meta::FrameMeta, Frame}, frame::{meta::AnyFrameMeta, Frame},
Vaddr, PAGE_SIZE, Vaddr, PAGE_SIZE,
}; };
use crate::{ use crate::{
@ -80,7 +80,7 @@ impl<G: PinCurrentCpu> TlbFlusher<G> {
/// flushed. Otherwise if the page is recycled for other purposes, the user /// flushed. Otherwise if the page is recycled for other purposes, the user
/// space program can still access the page through the TLB entries. This /// space program can still access the page through the TLB entries. This
/// method is designed to be used in such cases. /// method is designed to be used in such cases.
pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: Frame<dyn FrameMeta>) { pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: Frame<dyn AnyFrameMeta>) {
self.issue_tlb_flush_(op, Some(drop_after_flush)); self.issue_tlb_flush_(op, Some(drop_after_flush));
} }
@ -94,7 +94,7 @@ impl<G: PinCurrentCpu> TlbFlusher<G> {
self.need_self_flush self.need_self_flush
} }
fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option<Frame<dyn FrameMeta>>) { fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option<Frame<dyn AnyFrameMeta>>) {
let op = op.optimize_for_large_range(); let op = op.optimize_for_large_range();
// Fast path for single CPU cases. // Fast path for single CPU cases.
@ -159,7 +159,7 @@ impl TlbFlushOp {
// Lock ordering: lock FLUSH_OPS before PAGE_KEEPER. // Lock ordering: lock FLUSH_OPS before PAGE_KEEPER.
cpu_local! { cpu_local! {
static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new()); static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new());
static PAGE_KEEPER: SpinLock<Vec<Frame<dyn FrameMeta>>, LocalIrqDisabled> = SpinLock::new(Vec::new()); static PAGE_KEEPER: SpinLock<Vec<Frame<dyn AnyFrameMeta>>, LocalIrqDisabled> = SpinLock::new(Vec::new());
} }
fn do_remote_flush() { fn do_remote_flush() {

View File

@ -22,7 +22,7 @@ use crate::{
kspace::KERNEL_PAGE_TABLE, kspace::KERNEL_PAGE_TABLE,
page_table::{self, PageTable, PageTableItem, UserMode}, page_table::{self, PageTable, PageTableItem, UserMode},
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
DynUFrame, PageProperty, VmReader, VmWriter, MAX_USERSPACE_VADDR, PageProperty, UFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR,
}, },
prelude::*, prelude::*,
sync::{PreemptDisabled, RwLock, RwLockReadGuard}, sync::{PreemptDisabled, RwLock, RwLockReadGuard},
@ -40,7 +40,7 @@ use crate::{
/// ///
/// A newly-created `VmSpace` is not backed by any physical memory pages. To /// A newly-created `VmSpace` is not backed by any physical memory pages. To
/// provide memory pages for a `VmSpace`, one can allocate and map physical /// provide memory pages for a `VmSpace`, one can allocate and map physical
/// memory ([`DynUFrame`]s) to the `VmSpace` using the cursor. /// memory ([`UFrame`]s) to the `VmSpace` using the cursor.
/// ///
/// A `VmSpace` can also attach a page fault handler, which will be invoked to /// A `VmSpace` can also attach a page fault handler, which will be invoked to
/// handle page faults generated from user space. /// handle page faults generated from user space.
@ -323,7 +323,7 @@ impl CursorMut<'_, '_> {
/// Map a frame into the current slot. /// Map a frame into the current slot.
/// ///
/// This method will bring the cursor to the next slot after the modification. /// This method will bring the cursor to the next slot after the modification.
pub fn map(&mut self, frame: DynUFrame, prop: PageProperty) { pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
let start_va = self.virt_addr(); let start_va = self.virt_addr();
// SAFETY: It is safe to map untyped memory into the userspace. // SAFETY: It is safe to map untyped memory into the userspace.
let old = unsafe { self.pt_cursor.map(frame.into(), prop) }; let old = unsafe { self.pt_cursor.map(frame.into(), prop) };
@ -475,7 +475,7 @@ pub enum VmItem {
/// The virtual address of the slot. /// The virtual address of the slot.
va: Vaddr, va: Vaddr,
/// The mapped frame. /// The mapped frame.
frame: DynUFrame, frame: UFrame,
/// The property of the slot. /// The property of the slot.
prop: PageProperty, prop: PageProperty,
}, },