Renaming concepts around pages and frames

This commit renames `Frame` -> `UntypedFrame` and `Page` -> `Frame`.
So do other concepts in the following list:
 - `Segment` -> `UntypedSegment`,
 - `ContPages` -> `Segment`,
 - `DynPage` -> `AnyFrame`,
 - `PageMeta` -> `FrameMeta`,
 - `FrameMeta` -> `UntypedMeta`.

This commit also re-organized the source in the `mm/page` and `mm/frame`
module to accommodate the changes.
This commit is contained in:
Zhang Junyang
2024-12-17 15:04:38 +08:00
committed by Tate, Hongliang Tian
parent 10f1856306
commit c9a37ccab1
55 changed files with 1154 additions and 1160 deletions

View File

@ -5,8 +5,8 @@ use bitvec::array::BitArray;
use int_to_c_enum::TryFromInt; use int_to_c_enum::TryFromInt;
use ostd::{ use ostd::{
mm::{ mm::{
DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, Segment, VmIo, DmaDirection, DmaStream, DmaStreamSlice, FrameAllocOptions, Infallible, UntypedSegment,
VmReader, VmWriter, VmIo, VmReader, VmWriter,
}, },
sync::{SpinLock, WaitQueue}, sync::{SpinLock, WaitQueue},
Error, Error,
@ -442,8 +442,8 @@ impl<'a> BioSegment {
} }
} }
/// Constructs a new `BioSegment` with a given `Segment` and the bio direction. /// Constructs a new `BioSegment` with a given `UntypedSegment` and the bio direction.
pub fn new_from_segment(segment: Segment, direction: BioDirection) -> Self { pub fn new_from_segment(segment: UntypedSegment, direction: BioDirection) -> Self {
let len = segment.nbytes(); let len = segment.nbytes();
let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap(); let dma_stream = DmaStream::map(segment, direction.into(), false).unwrap();
Self { Self {
@ -481,7 +481,7 @@ impl<'a> BioSegment {
/// Returns the inner VM segment. /// Returns the inner VM segment.
#[cfg(ktest)] #[cfg(ktest)]
pub fn inner_segment(&self) -> &Segment { pub fn inner_segment(&self) -> &UntypedSegment {
self.inner.dma_slice.stream().vm_segment() self.inner.dma_slice.stream().vm_segment()
} }

View File

@ -76,7 +76,7 @@ impl VirtQueue {
} }
let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() { let (descriptor_ptr, avail_ring_ptr, used_ring_ptr) = if transport.is_legacy_version() {
// Currently, we use one Frame to place the descriptors and available rings, one Frame to place used rings // Currently, we use one UntypedFrame to place the descriptors and available rings, one UntypedFrame to place used rings
// because the virtio-mmio legacy required the address to be continuous. The max queue size is 128. // because the virtio-mmio legacy required the address to be continuous. The max queue size is 128.
if size > 128 { if size > 128 {
return Err(QueueError::InvalidArgs); return Err(QueueError::InvalidArgs);

View File

@ -54,7 +54,7 @@ use ostd::{
/// ///
/// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo` /// The generic parameter `M` of `SafePtr<_, M, _>` must implement the `VmIo`
/// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and /// trait. The most important `VmIo` types are `Vmar`, `Vmo`, `IoMem`, and
/// `Frame`. The blanket implementations of `VmIo` also include pointer-like /// `UntypedFrame`. The blanket implementations of `VmIo` also include pointer-like
/// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`, /// types that refer to a `VmIo` type. Some examples are `&Vmo`, `Box<Vmar>`,
/// and `Arc<IoMem>`. /// and `Arc<IoMem>`.
/// ///

View File

@ -2,41 +2,41 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! Provides [`SegmentSlice`] for quick duplication and slicing over [`Segment`]. //! Provides [`SegmentSlice`] for quick duplication and slicing over [`UntypedSegment`].
use alloc::sync::Arc; use alloc::sync::Arc;
use core::ops::Range; use core::ops::Range;
use ostd::{ use ostd::{
mm::{ mm::{
FallibleVmRead, FallibleVmWrite, Frame, Infallible, Paddr, Segment, VmIo, VmReader, FallibleVmRead, FallibleVmWrite, Infallible, Paddr, UntypedFrame, UntypedSegment, VmIo,
VmWriter, PAGE_SIZE, VmReader, VmWriter, PAGE_SIZE,
}, },
Error, Result, Error, Result,
}; };
/// A reference to a slice of a [`Segment`]. /// A reference to a slice of a [`UntypedSegment`].
/// ///
/// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference /// Cloning a [`SegmentSlice`] is cheap, as it only increments one reference
/// count. While cloning a [`Segment`] will increment the reference count of /// count. While cloning a [`UntypedSegment`] will increment the reference count of
/// many underlying pages. /// many underlying pages.
/// ///
/// The downside is that the [`SegmentSlice`] requires heap allocation. Also, /// The downside is that the [`SegmentSlice`] requires heap allocation. Also,
/// if any [`SegmentSlice`] of the original [`Segment`] is alive, all pages in /// if any [`SegmentSlice`] of the original [`UntypedSegment`] is alive, all pages in
/// the original [`Segment`], including the pages that are not referenced, will /// the original [`UntypedSegment`], including the pages that are not referenced, will
/// not be freed. /// not be freed.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct SegmentSlice { pub struct SegmentSlice {
inner: Arc<Segment>, inner: Arc<UntypedSegment>,
range: Range<usize>, range: Range<usize>,
} }
impl SegmentSlice { impl SegmentSlice {
/// Returns a part of the `Segment`. /// Returns a part of the `UntypedSegment`.
/// ///
/// # Panics /// # Panics
/// ///
/// If `range` is not within the range of this `Segment`, /// If `range` is not within the range of this `UntypedSegment`,
/// then the method panics. /// then the method panics.
pub fn range(&self, range: Range<usize>) -> Self { pub fn range(&self, range: Range<usize>) -> Self {
let orig_range = &self.range; let orig_range = &self.range;
@ -124,8 +124,8 @@ impl VmIo for SegmentSlice {
} }
} }
impl From<Segment> for SegmentSlice { impl From<UntypedSegment> for SegmentSlice {
fn from(segment: Segment) -> Self { fn from(segment: UntypedSegment) -> Self {
let range = 0..segment.nbytes() / PAGE_SIZE; let range = 0..segment.nbytes() / PAGE_SIZE;
Self { Self {
inner: Arc::new(segment), inner: Arc::new(segment),
@ -134,7 +134,7 @@ impl From<Segment> for SegmentSlice {
} }
} }
impl From<SegmentSlice> for Segment { impl From<SegmentSlice> for UntypedSegment {
fn from(slice: SegmentSlice) -> Self { fn from(slice: SegmentSlice) -> Self {
let start = slice.range.start * PAGE_SIZE; let start = slice.range.start * PAGE_SIZE;
let end = slice.range.end * PAGE_SIZE; let end = slice.range.end * PAGE_SIZE;
@ -142,8 +142,8 @@ impl From<SegmentSlice> for Segment {
} }
} }
impl From<Frame> for SegmentSlice { impl From<UntypedFrame> for SegmentSlice {
fn from(frame: Frame) -> Self { fn from(frame: UntypedFrame) -> Self {
SegmentSlice::from(Segment::from(frame)) SegmentSlice::from(UntypedSegment::from(frame))
} }
} }

View File

@ -12,7 +12,7 @@ use aster_block::{
}; };
use hashbrown::HashMap; use hashbrown::HashMap;
use lru::LruCache; use lru::LruCache;
use ostd::mm::Frame; use ostd::mm::UntypedFrame;
pub(super) use ostd::mm::VmIo; pub(super) use ostd::mm::VmIo;
use super::{ use super::{
@ -368,7 +368,7 @@ impl ExfatFS {
} }
impl PageCacheBackend for ExfatFS { impl PageCacheBackend for ExfatFS {
fn read_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
if self.fs_size() < idx * PAGE_SIZE { if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid read size") return_errno_with_message!(Errno::EINVAL, "invalid read size")
} }
@ -380,7 +380,7 @@ impl PageCacheBackend for ExfatFS {
Ok(waiter) Ok(waiter)
} }
fn write_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
if self.fs_size() < idx * PAGE_SIZE { if self.fs_size() < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "invalid write size") return_errno_with_message!(Errno::EINVAL, "invalid write size")
} }

View File

@ -13,7 +13,7 @@ use aster_block::{
BLOCK_SIZE, BLOCK_SIZE,
}; };
use aster_rights::Full; use aster_rights::Full;
use ostd::mm::{Frame, VmIo}; use ostd::mm::{UntypedFrame, VmIo};
use super::{ use super::{
constants::*, constants::*,
@ -135,7 +135,7 @@ struct ExfatInodeInner {
} }
impl PageCacheBackend for ExfatInode { impl PageCacheBackend for ExfatInode {
fn read_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
let inner = self.inner.read(); let inner = self.inner.read();
if inner.size < idx * PAGE_SIZE { if inner.size < idx * PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "Invalid read size") return_errno_with_message!(Errno::EINVAL, "Invalid read size")
@ -150,7 +150,7 @@ impl PageCacheBackend for ExfatInode {
Ok(waiter) Ok(waiter)
} }
fn write_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
let inner = self.inner.read(); let inner = self.inner.read();
let sector_size = inner.fs().sector_size(); let sector_size = inner.fs().sector_size();

View File

@ -22,7 +22,7 @@ mod test {
BlockDevice, BlockDeviceMeta, BlockDevice, BlockDeviceMeta,
}; };
use ostd::{ use ostd::{
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE}, mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE},
prelude::*, prelude::*,
}; };
use rand::{rngs::SmallRng, RngCore, SeedableRng}; use rand::{rngs::SmallRng, RngCore, SeedableRng};
@ -40,10 +40,10 @@ mod test {
/// Followings are implementations of memory simulated block device /// Followings are implementations of memory simulated block device
pub const SECTOR_SIZE: usize = 512; pub const SECTOR_SIZE: usize = 512;
struct ExfatMemoryBioQueue(Segment); struct ExfatMemoryBioQueue(UntypedSegment);
impl ExfatMemoryBioQueue { impl ExfatMemoryBioQueue {
pub fn new(segment: Segment) -> Self { pub fn new(segment: UntypedSegment) -> Self {
ExfatMemoryBioQueue(segment) ExfatMemoryBioQueue(segment)
} }
@ -57,7 +57,7 @@ mod test {
} }
impl ExfatMemoryDisk { impl ExfatMemoryDisk {
pub fn new(segment: Segment) -> Self { pub fn new(segment: UntypedSegment) -> Self {
ExfatMemoryDisk { ExfatMemoryDisk {
queue: ExfatMemoryBioQueue::new(segment), queue: ExfatMemoryBioQueue::new(segment),
} }
@ -111,7 +111,7 @@ mod test {
static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../test/build/exfat.img"); static EXFAT_IMAGE: &[u8] = include_bytes!("../../../../test/build/exfat.img");
/// Read exfat disk image /// Read exfat disk image
fn new_vm_segment_from_image() -> Segment { fn new_vm_segment_from_image() -> UntypedSegment {
let vm_segment = FrameAllocOptions::new(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE)) let vm_segment = FrameAllocOptions::new(EXFAT_IMAGE.len().div_ceil(PAGE_SIZE))
.uninit(true) .uninit(true)
.alloc_contiguous() .alloc_contiguous()

View File

@ -28,7 +28,7 @@ struct BlockGroupImpl {
impl BlockGroup { impl BlockGroup {
/// Loads and constructs a block group. /// Loads and constructs a block group.
pub fn load( pub fn load(
group_descriptors_segment: &Segment, group_descriptors_segment: &UntypedSegment,
idx: usize, idx: usize,
block_device: &dyn BlockDevice, block_device: &dyn BlockDevice,
super_block: &SuperBlock, super_block: &SuperBlock,
@ -318,7 +318,7 @@ impl Debug for BlockGroup {
} }
impl PageCacheBackend for BlockGroupImpl { impl PageCacheBackend for BlockGroupImpl {
fn read_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
let bid = self.inode_table_bid + idx as Ext2Bid; let bid = self.inode_table_bid + idx as Ext2Bid;
let bio_segment = let bio_segment =
BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice); BioSegment::new_from_segment(frame.clone().into(), BioDirection::FromDevice);
@ -328,7 +328,7 @@ impl PageCacheBackend for BlockGroupImpl {
.read_blocks_async(bid, bio_segment) .read_blocks_async(bid, bio_segment)
} }
fn write_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
let bid = self.inode_table_bid + idx as Ext2Bid; let bid = self.inode_table_bid + idx as Ext2Bid;
let bio_segment = let bio_segment =
BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice); BioSegment::new_from_segment(frame.clone().into(), BioDirection::ToDevice);

View File

@ -23,7 +23,7 @@ pub struct Ext2 {
blocks_per_group: Ext2Bid, blocks_per_group: Ext2Bid,
inode_size: usize, inode_size: usize,
block_size: usize, block_size: usize,
group_descriptors_segment: Segment, group_descriptors_segment: UntypedSegment,
self_ref: Weak<Self>, self_ref: Weak<Self>,
} }
@ -63,7 +63,7 @@ impl Ext2 {
// Load the block groups information // Load the block groups information
let load_block_groups = |fs: Weak<Ext2>, let load_block_groups = |fs: Weak<Ext2>,
block_device: &dyn BlockDevice, block_device: &dyn BlockDevice,
group_descriptors_segment: &Segment| group_descriptors_segment: &UntypedSegment|
-> Result<Vec<BlockGroup>> { -> Result<Vec<BlockGroup>> {
let block_groups_count = super_block.block_groups_count() as usize; let block_groups_count = super_block.block_groups_count() as usize;
let mut block_groups = Vec::with_capacity(block_groups_count); let mut block_groups = Vec::with_capacity(block_groups_count);

View File

@ -132,7 +132,7 @@ impl IndirectBlockCache {
/// Represents a single indirect block buffer cached by the `IndirectCache`. /// Represents a single indirect block buffer cached by the `IndirectCache`.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct IndirectBlock { pub struct IndirectBlock {
frame: Frame, frame: UntypedFrame,
state: State, state: State,
} }

View File

@ -1733,7 +1733,7 @@ impl InodeImpl {
writer: &mut VmWriter, writer: &mut VmWriter,
) -> Result<BioWaiter>; ) -> Result<BioWaiter>;
pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>; pub fn read_blocks(&self, bid: Ext2Bid, nblocks: usize, writer: &mut VmWriter) -> Result<()>;
pub fn read_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter>; pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter>;
pub fn write_blocks_async( pub fn write_blocks_async(
&self, &self,
bid: Ext2Bid, bid: Ext2Bid,
@ -1741,7 +1741,7 @@ impl InodeImpl {
reader: &mut VmReader, reader: &mut VmReader,
) -> Result<BioWaiter>; ) -> Result<BioWaiter>;
pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>; pub fn write_blocks(&self, bid: Ext2Bid, nblocks: usize, reader: &mut VmReader) -> Result<()>;
pub fn write_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter>; pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter>;
} }
/// Manages the inode blocks and block I/O operations. /// Manages the inode blocks and block I/O operations.
@ -1789,7 +1789,7 @@ impl InodeBlockManager {
} }
} }
pub fn read_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter> { pub fn read_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter> {
let mut bio_waiter = BioWaiter::new(); let mut bio_waiter = BioWaiter::new();
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
@ -1834,7 +1834,7 @@ impl InodeBlockManager {
} }
} }
pub fn write_block_async(&self, bid: Ext2Bid, frame: &Frame) -> Result<BioWaiter> { pub fn write_block_async(&self, bid: Ext2Bid, frame: &UntypedFrame) -> Result<BioWaiter> {
let mut bio_waiter = BioWaiter::new(); let mut bio_waiter = BioWaiter::new();
for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? { for dev_range in DeviceRangeReader::new(self, bid..bid + 1 as Ext2Bid)? {
@ -1858,12 +1858,12 @@ impl InodeBlockManager {
} }
impl PageCacheBackend for InodeBlockManager { impl PageCacheBackend for InodeBlockManager {
fn read_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
let bid = idx as Ext2Bid; let bid = idx as Ext2Bid;
self.read_block_async(bid, frame) self.read_block_async(bid, frame)
} }
fn write_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter> { fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
let bid = idx as Ext2Bid; let bid = idx as Ext2Bid;
self.write_block_async(bid, frame) self.write_block_async(bid, frame)
} }

View File

@ -13,7 +13,7 @@ pub(super) use aster_block::{
}; };
pub(super) use aster_rights::Full; pub(super) use aster_rights::Full;
pub(super) use ostd::{ pub(super) use ostd::{
mm::{Frame, FrameAllocOptions, Segment, VmIo}, mm::{FrameAllocOptions, UntypedFrame, UntypedSegment, VmIo},
sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard}, sync::{RwMutex, RwMutexReadGuard, RwMutexWriteGuard},
}; };
pub(super) use static_assertions::const_assert; pub(super) use static_assertions::const_assert;

View File

@ -11,7 +11,7 @@ use aster_rights::Full;
use aster_util::slot_vec::SlotVec; use aster_util::slot_vec::SlotVec;
use hashbrown::HashMap; use hashbrown::HashMap;
use ostd::{ use ostd::{
mm::{Frame, VmIo}, mm::{UntypedFrame, VmIo},
sync::{PreemptDisabled, RwLockWriteGuard}, sync::{PreemptDisabled, RwLockWriteGuard},
}; };
@ -484,7 +484,7 @@ impl RamInode {
} }
impl PageCacheBackend for RamInode { impl PageCacheBackend for RamInode {
fn read_page_async(&self, _idx: usize, frame: &Frame) -> Result<BioWaiter> { fn read_page_async(&self, _idx: usize, frame: &UntypedFrame) -> Result<BioWaiter> {
// Initially, any block/page in a RamFs inode contains all zeros // Initially, any block/page in a RamFs inode contains all zeros
frame frame
.writer() .writer()
@ -494,7 +494,7 @@ impl PageCacheBackend for RamInode {
Ok(BioWaiter::new()) Ok(BioWaiter::new())
} }
fn write_page_async(&self, _idx: usize, _frame: &Frame) -> Result<BioWaiter> { fn write_page_async(&self, _idx: usize, _frame: &UntypedFrame) -> Result<BioWaiter> {
// do nothing // do nothing
Ok(BioWaiter::new()) Ok(BioWaiter::new())
} }

View File

@ -8,7 +8,7 @@ use align_ext::AlignExt;
use aster_block::bio::{BioStatus, BioWaiter}; use aster_block::bio::{BioStatus, BioWaiter};
use aster_rights::Full; use aster_rights::Full;
use lru::LruCache; use lru::LruCache;
use ostd::mm::{Frame, FrameAllocOptions, VmIo}; use ostd::mm::{FrameAllocOptions, UntypedFrame, VmIo};
use crate::{ use crate::{
prelude::*, prelude::*,
@ -381,7 +381,7 @@ impl PageCacheManager {
Ok(()) Ok(())
} }
fn ondemand_readahead(&self, idx: usize) -> Result<Frame> { fn ondemand_readahead(&self, idx: usize) -> Result<UntypedFrame> {
let mut pages = self.pages.lock(); let mut pages = self.pages.lock();
let mut ra_state = self.ra_state.lock(); let mut ra_state = self.ra_state.lock();
let backend = self.backend(); let backend = self.backend();
@ -438,7 +438,7 @@ impl Debug for PageCacheManager {
} }
impl Pager for PageCacheManager { impl Pager for PageCacheManager {
fn commit_page(&self, idx: usize) -> Result<Frame> { fn commit_page(&self, idx: usize) -> Result<UntypedFrame> {
self.ondemand_readahead(idx) self.ondemand_readahead(idx)
} }
@ -469,7 +469,7 @@ impl Pager for PageCacheManager {
Ok(()) Ok(())
} }
fn commit_overwrite(&self, idx: usize) -> Result<Frame> { fn commit_overwrite(&self, idx: usize) -> Result<UntypedFrame> {
if let Some(page) = self.pages.lock().get(&idx) { if let Some(page) = self.pages.lock().get(&idx) {
return Ok(page.frame.clone()); return Ok(page.frame.clone());
} }
@ -481,7 +481,7 @@ impl Pager for PageCacheManager {
#[derive(Debug)] #[derive(Debug)]
struct Page { struct Page {
frame: Frame, frame: UntypedFrame,
state: PageState, state: PageState,
} }
@ -502,7 +502,7 @@ impl Page {
}) })
} }
pub fn frame(&self) -> &Frame { pub fn frame(&self) -> &UntypedFrame {
&self.frame &self.frame
} }
@ -531,16 +531,16 @@ enum PageState {
/// This trait represents the backend for the page cache. /// This trait represents the backend for the page cache.
pub trait PageCacheBackend: Sync + Send { pub trait PageCacheBackend: Sync + Send {
/// Reads a page from the backend asynchronously. /// Reads a page from the backend asynchronously.
fn read_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter>; fn read_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter>;
/// Writes a page to the backend asynchronously. /// Writes a page to the backend asynchronously.
fn write_page_async(&self, idx: usize, frame: &Frame) -> Result<BioWaiter>; fn write_page_async(&self, idx: usize, frame: &UntypedFrame) -> Result<BioWaiter>;
/// Returns the number of pages in the backend. /// Returns the number of pages in the backend.
fn npages(&self) -> usize; fn npages(&self) -> usize;
} }
impl dyn PageCacheBackend { impl dyn PageCacheBackend {
/// Reads a page from the backend synchronously. /// Reads a page from the backend synchronously.
fn read_page(&self, idx: usize, frame: &Frame) -> Result<()> { fn read_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> {
let waiter = self.read_page_async(idx, frame)?; let waiter = self.read_page_async(idx, frame)?;
match waiter.wait() { match waiter.wait() {
Some(BioStatus::Complete) => Ok(()), Some(BioStatus::Complete) => Ok(()),
@ -548,7 +548,7 @@ impl dyn PageCacheBackend {
} }
} }
/// Writes a page to the backend synchronously. /// Writes a page to the backend synchronously.
fn write_page(&self, idx: usize, frame: &Frame) -> Result<()> { fn write_page(&self, idx: usize, frame: &UntypedFrame) -> Result<()> {
let waiter = self.write_page_async(idx, frame)?; let waiter = self.write_page_async(idx, frame)?;
match waiter.wait() { match waiter.wait() {
Some(BioStatus::Complete) => Ok(()), Some(BioStatus::Complete) => Ok(()),

View File

@ -8,12 +8,12 @@ use core::{
use align_ext::AlignExt; use align_ext::AlignExt;
use inherit_methods_macro::inherit_methods; use inherit_methods_macro::inherit_methods;
use ostd::mm::{FrameAllocOptions, Segment, VmIo}; use ostd::mm::{FrameAllocOptions, UntypedSegment, VmIo};
use super::{MultiRead, MultiWrite}; use super::{MultiRead, MultiWrite};
use crate::prelude::*; use crate::prelude::*;
/// A lock-free SPSC FIFO ring buffer backed by a [`Segment`]. /// A lock-free SPSC FIFO ring buffer backed by a [`UntypedSegment`].
/// ///
/// The ring buffer supports `push`/`pop` any `T: Pod` items, also /// The ring buffer supports `push`/`pop` any `T: Pod` items, also
/// supports `write`/`read` any bytes data based on [`VmReader`]/[`VmWriter`]. /// supports `write`/`read` any bytes data based on [`VmReader`]/[`VmWriter`].
@ -46,7 +46,7 @@ use crate::prelude::*;
/// } /// }
/// ``` /// ```
pub struct RingBuffer<T> { pub struct RingBuffer<T> {
segment: Segment, segment: UntypedSegment,
capacity: usize, capacity: usize,
tail: AtomicUsize, tail: AtomicUsize,
head: AtomicUsize, head: AtomicUsize,

View File

@ -21,7 +21,7 @@ use aster_rights::Rights;
use aster_time::{read_monotonic_time, Instant}; use aster_time::{read_monotonic_time, Instant};
use aster_util::coeff::Coeff; use aster_util::coeff::Coeff;
use ostd::{ use ostd::{
mm::{Frame, VmIo, PAGE_SIZE}, mm::{UntypedFrame, VmIo, PAGE_SIZE},
sync::SpinLock, sync::SpinLock,
Pod, Pod,
}; };
@ -199,9 +199,9 @@ struct Vdso {
data: SpinLock<VdsoData>, data: SpinLock<VdsoData>,
/// The VMO of the entire VDSO, including the library text and the VDSO data. /// The VMO of the entire VDSO, including the library text and the VDSO data.
vmo: Arc<Vmo>, vmo: Arc<Vmo>,
/// The `Frame` that contains the VDSO data. This frame is contained in and /// The `UntypedFrame` that contains the VDSO data. This frame is contained in and
/// will not be removed from the VDSO VMO. /// will not be removed from the VDSO VMO.
data_frame: Frame, data_frame: UntypedFrame,
} }
/// A `SpinLock` for the `seq` field in `VdsoData`. /// A `SpinLock` for the `seq` field in `VdsoData`.

View File

@ -1,11 +1,11 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use ostd::mm::{Frame, FrameAllocOptions}; use ostd::mm::{FrameAllocOptions, UntypedFrame};
use crate::prelude::*; use crate::prelude::*;
/// Creates a new `Frame` and initializes it with the contents of the `src`. /// Creates a new `UntypedFrame` and initializes it with the contents of the `src`.
pub fn duplicate_frame(src: &Frame) -> Result<Frame> { pub fn duplicate_frame(src: &UntypedFrame) -> Result<UntypedFrame> {
let new_frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?; let new_frame = FrameAllocOptions::new(1).uninit(true).alloc_single()?;
new_frame.copy_from(src); new_frame.copy_from(src);
Ok(new_frame) Ok(new_frame)

View File

@ -8,8 +8,8 @@ use core::{
use align_ext::AlignExt; use align_ext::AlignExt;
use ostd::mm::{ use ostd::mm::{
tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, Frame, FrameAllocOptions, PageFlags, tlb::TlbFlushOp, vm_space::VmItem, CachePolicy, FrameAllocOptions, PageFlags, PageProperty,
PageProperty, VmSpace, UntypedFrame, VmSpace,
}; };
use super::interval_set::Interval; use super::interval_set::Interval;
@ -216,7 +216,7 @@ impl VmMapping {
Ok(()) Ok(())
} }
fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(Frame, bool)> { fn prepare_page(&self, page_fault_addr: Vaddr, write: bool) -> Result<(UntypedFrame, bool)> {
let mut is_readonly = false; let mut is_readonly = false;
let Some(vmo) = &self.vmo else { let Some(vmo) = &self.vmo else {
return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly)); return Ok((FrameAllocOptions::new(1).alloc_single()?, is_readonly));
@ -264,7 +264,7 @@ impl VmMapping {
let vm_perms = self.perms - VmPerms::WRITE; let vm_perms = self.perms - VmPerms::WRITE;
let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?; let mut cursor = vm_space.cursor_mut(&(start_addr..end_addr))?;
let operate = move |commit_fn: &mut dyn FnMut() -> Result<Frame>| { let operate = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
if let VmItem::NotMapped { .. } = cursor.query().unwrap() { if let VmItem::NotMapped { .. } = cursor.query().unwrap() {
// We regard all the surrounding pages as accessed, no matter // We regard all the surrounding pages as accessed, no matter
// if it is really so. Then the hardware won't bother to update // if it is really so. Then the hardware won't bother to update
@ -432,7 +432,7 @@ impl MappedVmo {
/// ///
/// If the VMO has not committed a frame at this index, it will commit /// If the VMO has not committed a frame at this index, it will commit
/// one first and return it. /// one first and return it.
fn get_committed_frame(&self, page_offset: usize) -> Result<Frame> { fn get_committed_frame(&self, page_offset: usize) -> Result<UntypedFrame> {
debug_assert!(page_offset < self.range.len()); debug_assert!(page_offset < self.range.len());
debug_assert!(page_offset % PAGE_SIZE == 0); debug_assert!(page_offset % PAGE_SIZE == 0);
self.vmo.commit_page(self.range.start + page_offset) self.vmo.commit_page(self.range.start + page_offset)
@ -444,7 +444,7 @@ impl MappedVmo {
/// perform other operations. /// perform other operations.
fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()> fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<Frame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
{ {
debug_assert!(range.start < self.range.len()); debug_assert!(range.start < self.range.len());
debug_assert!(range.end <= self.range.len()); debug_assert!(range.end <= self.range.len());

View File

@ -3,14 +3,14 @@
use core::ops::Range; use core::ops::Range;
use aster_rights::{Rights, TRights}; use aster_rights::{Rights, TRights};
use ostd::mm::{Frame, VmIo}; use ostd::mm::{UntypedFrame, VmIo};
use super::{CommitFlags, Vmo, VmoRightsOp}; use super::{CommitFlags, Vmo, VmoRightsOp};
use crate::prelude::*; use crate::prelude::*;
impl Vmo<Rights> { impl Vmo<Rights> {
/// Commits a page at specific offset /// Commits a page at specific offset
pub fn commit_page(&self, offset: usize) -> Result<Frame> { pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset) self.0.commit_page(offset)
} }
@ -39,7 +39,7 @@ impl Vmo<Rights> {
/// perform other operations. /// perform other operations.
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()> pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<Frame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
{ {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0 self.0
@ -112,7 +112,7 @@ impl Vmo<Rights> {
/// # Access rights /// # Access rights
/// ///
/// The method requires the Write right. /// The method requires the Write right.
pub fn replace(&self, page: Frame, page_idx: usize) -> Result<()> { pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0.replace(page, page_idx) self.0.replace(page, page_idx)
} }

View File

@ -11,7 +11,7 @@ use align_ext::AlignExt;
use aster_rights::Rights; use aster_rights::Rights;
use ostd::{ use ostd::{
collections::xarray::{CursorMut, XArray}, collections::xarray::{CursorMut, XArray},
mm::{Frame, FrameAllocOptions, VmReader, VmWriter}, mm::{FrameAllocOptions, UntypedFrame, VmReader, VmWriter},
}; };
use crate::prelude::*; use crate::prelude::*;
@ -66,8 +66,8 @@ pub use pager::Pager;
/// # Implementation /// # Implementation
/// ///
/// `Vmo` provides high-level APIs for address space management by wrapping /// `Vmo` provides high-level APIs for address space management by wrapping
/// around its low-level counterpart [`ostd::mm::Frame`]. /// around its low-level counterpart [`ostd::mm::UntypedFrame`].
/// Compared with `Frame`, /// Compared with `UntypedFrame`,
/// `Vmo` is easier to use (by offering more powerful APIs) and /// `Vmo` is easier to use (by offering more powerful APIs) and
/// harder to misuse (thanks to its nature of being capability). /// harder to misuse (thanks to its nature of being capability).
#[derive(Debug)] #[derive(Debug)]
@ -125,12 +125,12 @@ bitflags! {
} }
} }
/// `Pages` is the struct that manages the `Frame`s stored in `Vmo_`. /// `Pages` is the struct that manages the `UntypedFrame`s stored in `Vmo_`.
pub(super) enum Pages { pub(super) enum Pages {
/// `Pages` that cannot be resized. This kind of `Pages` will have a constant size. /// `Pages` that cannot be resized. This kind of `Pages` will have a constant size.
Nonresizable(Mutex<XArray<Frame>>, usize), Nonresizable(Mutex<XArray<UntypedFrame>>, usize),
/// `Pages` that can be resized and have a variable size. /// `Pages` that can be resized and have a variable size.
Resizable(Mutex<(XArray<Frame>, usize)>), Resizable(Mutex<(XArray<UntypedFrame>, usize)>),
} }
impl Clone for Pages { impl Clone for Pages {
@ -149,7 +149,7 @@ impl Clone for Pages {
impl Pages { impl Pages {
fn with<R, F>(&self, func: F) -> R fn with<R, F>(&self, func: F) -> R
where where
F: FnOnce(&mut XArray<Frame>, usize) -> R, F: FnOnce(&mut XArray<UntypedFrame>, usize) -> R,
{ {
match self { match self {
Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size), Self::Nonresizable(pages, size) => func(&mut pages.lock(), *size),
@ -201,16 +201,16 @@ impl CommitFlags {
} }
impl Vmo_ { impl Vmo_ {
/// Prepares a new `Frame` for the target index in pages, returns this new frame. /// Prepares a new `UntypedFrame` for the target index in pages, returns this new frame.
fn prepare_page(&self, page_idx: usize) -> Result<Frame> { fn prepare_page(&self, page_idx: usize) -> Result<UntypedFrame> {
match &self.pager { match &self.pager {
None => Ok(FrameAllocOptions::new(1).alloc_single()?), None => Ok(FrameAllocOptions::new(1).alloc_single()?),
Some(pager) => pager.commit_page(page_idx), Some(pager) => pager.commit_page(page_idx),
} }
} }
/// Prepares a new `Frame` for the target index in the VMO, returns this new frame. /// Prepares a new `UntypedFrame` for the target index in the VMO, returns this new frame.
fn prepare_overwrite(&self, page_idx: usize) -> Result<Frame> { fn prepare_overwrite(&self, page_idx: usize) -> Result<UntypedFrame> {
if let Some(pager) = &self.pager { if let Some(pager) = &self.pager {
pager.commit_overwrite(page_idx) pager.commit_overwrite(page_idx)
} else { } else {
@ -220,9 +220,9 @@ impl Vmo_ {
fn commit_with_cursor( fn commit_with_cursor(
&self, &self,
cursor: &mut CursorMut<'_, Frame>, cursor: &mut CursorMut<'_, UntypedFrame>,
commit_flags: CommitFlags, commit_flags: CommitFlags,
) -> Result<Frame> { ) -> Result<UntypedFrame> {
let new_page = { let new_page = {
if let Some(committed_page) = cursor.load() { if let Some(committed_page) = cursor.load() {
// Fast path: return the page directly. // Fast path: return the page directly.
@ -241,7 +241,7 @@ impl Vmo_ {
/// Commits the page corresponding to the target offset in the VMO and return that page. /// Commits the page corresponding to the target offset in the VMO and return that page.
/// If the current offset has already been committed, the page will be returned directly. /// If the current offset has already been committed, the page will be returned directly.
pub fn commit_page(&self, offset: usize) -> Result<Frame> { pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
let page_idx = offset / PAGE_SIZE; let page_idx = offset / PAGE_SIZE;
self.pages.with(|pages, size| { self.pages.with(|pages, size| {
if offset >= size { if offset >= size {
@ -279,7 +279,7 @@ impl Vmo_ {
commit_flags: CommitFlags, commit_flags: CommitFlags,
) -> Result<()> ) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<Frame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
{ {
self.pages.with(|pages, size| { self.pages.with(|pages, size| {
if range.end > size { if range.end > size {
@ -315,7 +315,7 @@ impl Vmo_ {
let read_range = offset..(offset + read_len); let read_range = offset..(offset + read_len);
let mut read_offset = offset % PAGE_SIZE; let mut read_offset = offset % PAGE_SIZE;
let read = move |commit_fn: &mut dyn FnMut() -> Result<Frame>| { let read = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
let frame = commit_fn()?; let frame = commit_fn()?;
frame.reader().skip(read_offset).read_fallible(writer)?; frame.reader().skip(read_offset).read_fallible(writer)?;
read_offset = 0; read_offset = 0;
@ -331,7 +331,7 @@ impl Vmo_ {
let write_range = offset..(offset + write_len); let write_range = offset..(offset + write_len);
let mut write_offset = offset % PAGE_SIZE; let mut write_offset = offset % PAGE_SIZE;
let mut write = move |commit_fn: &mut dyn FnMut() -> Result<Frame>| { let mut write = move |commit_fn: &mut dyn FnMut() -> Result<UntypedFrame>| {
let frame = commit_fn()?; let frame = commit_fn()?;
frame.writer().skip(write_offset).write_fallible(reader)?; frame.writer().skip(write_offset).write_fallible(reader)?;
write_offset = 0; write_offset = 0;
@ -401,7 +401,7 @@ impl Vmo_ {
Ok(()) Ok(())
} }
fn decommit_pages(&self, pages: &mut XArray<Frame>, range: Range<usize>) -> Result<()> { fn decommit_pages(&self, pages: &mut XArray<UntypedFrame>, range: Range<usize>) -> Result<()> {
let page_idx_range = get_page_idx_range(&range); let page_idx_range = get_page_idx_range(&range);
let mut cursor = pages.cursor_mut(page_idx_range.start as u64); let mut cursor = pages.cursor_mut(page_idx_range.start as u64);
for page_idx in page_idx_range { for page_idx in page_idx_range {
@ -426,7 +426,7 @@ impl Vmo_ {
self.flags self.flags
} }
fn replace(&self, page: Frame, page_idx: usize) -> Result<()> { fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
self.pages.with(|pages, size| { self.pages.with(|pages, size| {
if page_idx >= size / PAGE_SIZE { if page_idx >= size / PAGE_SIZE {
return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo"); return_errno_with_message!(Errno::EINVAL, "the page index is outside of the vmo");

View File

@ -8,7 +8,7 @@ use align_ext::AlignExt;
use aster_rights::{Rights, TRightSet, TRights}; use aster_rights::{Rights, TRightSet, TRights};
use ostd::{ use ostd::{
collections::xarray::XArray, collections::xarray::XArray,
mm::{Frame, FrameAllocOptions}, mm::{FrameAllocOptions, UntypedFrame},
}; };
use super::{Pager, Pages, Vmo, VmoFlags}; use super::{Pager, Pages, Vmo, VmoFlags};
@ -137,7 +137,7 @@ fn alloc_vmo_(size: usize, flags: VmoFlags, pager: Option<Arc<dyn Pager>>) -> Re
}) })
} }
fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<Frame>> { fn committed_pages_if_continuous(flags: VmoFlags, size: usize) -> Result<XArray<UntypedFrame>> {
if flags.contains(VmoFlags::CONTIGUOUS) { if flags.contains(VmoFlags::CONTIGUOUS) {
// if the vmo is continuous, we need to allocate frames for the vmo // if the vmo is continuous, we need to allocate frames for the vmo
let frames_num = size / PAGE_SIZE; let frames_num = size / PAGE_SIZE;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use ostd::mm::Frame; use ostd::mm::UntypedFrame;
use crate::prelude::*; use crate::prelude::*;
@ -26,7 +26,7 @@ pub trait Pager: Send + Sync {
/// whatever frame that may or may not be the same as the last time. /// whatever frame that may or may not be the same as the last time.
/// ///
/// It is up to the pager to decide the range of valid indices. /// It is up to the pager to decide the range of valid indices.
fn commit_page(&self, idx: usize) -> Result<Frame>; fn commit_page(&self, idx: usize) -> Result<UntypedFrame>;
/// Notify the pager that the frame at a specified index has been updated. /// Notify the pager that the frame at a specified index has been updated.
/// ///
@ -54,5 +54,5 @@ pub trait Pager: Send + Sync {
/// Ask the pager to provide a frame at a specified index. /// Ask the pager to provide a frame at a specified index.
/// Notify the pager that the frame will be fully overwritten soon, so pager can /// Notify the pager that the frame will be fully overwritten soon, so pager can
/// choose not to initialize it. /// choose not to initialize it.
fn commit_overwrite(&self, idx: usize) -> Result<Frame>; fn commit_overwrite(&self, idx: usize) -> Result<UntypedFrame>;
} }

View File

@ -4,14 +4,14 @@ use core::ops::Range;
use aster_rights::{Dup, Rights, TRightSet, TRights, Write}; use aster_rights::{Dup, Rights, TRightSet, TRights, Write};
use aster_rights_proc::require; use aster_rights_proc::require;
use ostd::mm::{Frame, VmIo}; use ostd::mm::{UntypedFrame, VmIo};
use super::{CommitFlags, Vmo, VmoRightsOp}; use super::{CommitFlags, Vmo, VmoRightsOp};
use crate::prelude::*; use crate::prelude::*;
impl<R: TRights> Vmo<TRightSet<R>> { impl<R: TRights> Vmo<TRightSet<R>> {
/// Commits a page at specific offset. /// Commits a page at specific offset.
pub fn commit_page(&self, offset: usize) -> Result<Frame> { pub fn commit_page(&self, offset: usize) -> Result<UntypedFrame> {
self.check_rights(Rights::WRITE)?; self.check_rights(Rights::WRITE)?;
self.0.commit_page(offset) self.0.commit_page(offset)
} }
@ -41,7 +41,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
#[require(R > Write)] #[require(R > Write)]
pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()> pub(in crate::vm) fn operate_on_range<F>(&self, range: &Range<usize>, operate: F) -> Result<()>
where where
F: FnMut(&mut dyn FnMut() -> Result<Frame>) -> Result<()>, F: FnMut(&mut dyn FnMut() -> Result<UntypedFrame>) -> Result<()>,
{ {
self.0 self.0
.operate_on_range(range, operate, CommitFlags::empty()) .operate_on_range(range, operate, CommitFlags::empty())
@ -114,7 +114,7 @@ impl<R: TRights> Vmo<TRightSet<R>> {
/// ///
/// The method requires the Write right. /// The method requires the Write right.
#[require(R > Write)] #[require(R > Write)]
pub fn replace(&self, page: Frame, page_idx: usize) -> Result<()> { pub fn replace(&self, page: UntypedFrame, page_idx: usize) -> Result<()> {
self.0.replace(page, page_idx) self.0.replace(page, page_idx)
} }

View File

@ -40,7 +40,7 @@ fn create_user_space(program: &[u8]) -> UserSpace {
.alloc_contiguous() .alloc_contiguous()
.unwrap(); .unwrap();
// Physical memory pages can be only accessed // Physical memory pages can be only accessed
// via the `Frame` or `Segment` abstraction. // via the `UntypedFrame` or `UntypedSegment` abstraction.
segment.write_bytes(0, program).unwrap(); segment.write_bytes(0, program).unwrap();
segment segment
}; };

View File

@ -15,7 +15,7 @@ use crate::{
dma::Daddr, dma::Daddr,
page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags}, page_prop::{CachePolicy, PageProperty, PrivilegedPageFlags as PrivFlags},
page_table::{PageTableError, PageTableItem}, page_table::{PageTableError, PageTableItem},
Frame, FrameAllocOptions, Paddr, PageFlags, PageTable, VmIo, PAGE_SIZE, FrameAllocOptions, Paddr, PageFlags, PageTable, UntypedFrame, VmIo, PAGE_SIZE,
}, },
}; };
@ -38,7 +38,7 @@ impl RootEntry {
pub struct RootTable { pub struct RootTable {
/// Total 256 bus, each entry is 128 bits. /// Total 256 bus, each entry is 128 bits.
root_frame: Frame, root_frame: UntypedFrame,
// TODO: Use radix tree instead. // TODO: Use radix tree instead.
context_tables: BTreeMap<Paddr, ContextTable>, context_tables: BTreeMap<Paddr, ContextTable>,
} }
@ -236,7 +236,7 @@ pub enum AddressWidth {
pub struct ContextTable { pub struct ContextTable {
/// Total 32 devices, each device has 8 functions. /// Total 32 devices, each device has 8 functions.
entries_frame: Frame, entries_frame: UntypedFrame,
page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>, page_tables: BTreeMap<Paddr, PageTable<DeviceMode, PageTableEntry, PagingConsts>>,
} }

View File

@ -9,7 +9,7 @@ use int_to_c_enum::TryFromInt;
use super::IrtEntryHandle; use super::IrtEntryHandle;
use crate::{ use crate::{
mm::{paddr_to_vaddr, FrameAllocOptions, Segment, PAGE_SIZE}, mm::{paddr_to_vaddr, FrameAllocOptions, UntypedSegment, PAGE_SIZE},
sync::{LocalIrqDisabled, SpinLock}, sync::{LocalIrqDisabled, SpinLock},
}; };
@ -23,7 +23,7 @@ enum ExtendedInterruptMode {
pub struct IntRemappingTable { pub struct IntRemappingTable {
size: u16, size: u16,
extended_interrupt_mode: ExtendedInterruptMode, extended_interrupt_mode: ExtendedInterruptMode,
frames: Segment, frames: UntypedSegment,
/// The global allocator for Interrupt remapping entry. /// The global allocator for Interrupt remapping entry.
allocator: SpinLock<IdAlloc, LocalIrqDisabled>, allocator: SpinLock<IdAlloc, LocalIrqDisabled>,
handles: Vec<Arc<SpinLock<IrtEntryHandle, LocalIrqDisabled>>>, handles: Vec<Arc<SpinLock<IrtEntryHandle, LocalIrqDisabled>>>,
@ -35,7 +35,7 @@ impl IntRemappingTable {
Some(self.handles.get(id).unwrap().clone()) Some(self.handles.get(id).unwrap().clone())
} }
/// Creates an Interrupt Remapping Table with one Frame (default). /// Creates an Interrupt Remapping Table with one UntypedFrame (default).
pub(super) fn new() -> Self { pub(super) fn new() -> Self {
const DEFAULT_PAGES: usize = 1; const DEFAULT_PAGES: usize = 1;
let segment = FrameAllocOptions::new(DEFAULT_PAGES) let segment = FrameAllocOptions::new(DEFAULT_PAGES)

View File

@ -3,12 +3,12 @@
use core::mem::size_of; use core::mem::size_of;
use crate::{ use crate::{
mm::{FrameAllocOptions, Segment, VmIo, PAGE_SIZE}, mm::{FrameAllocOptions, UntypedSegment, VmIo, PAGE_SIZE},
prelude::Paddr, prelude::Paddr,
}; };
pub struct Queue { pub struct Queue {
segment: Segment, segment: UntypedSegment,
queue_size: usize, queue_size: usize,
tail: usize, tail: usize,
} }

View File

@ -11,10 +11,9 @@ use crate::{
arch::boot::smp::{bringup_all_aps, get_num_processors}, arch::boot::smp::{bringup_all_aps, get_num_processors},
cpu, cpu,
mm::{ mm::{
frame::{self, Segment},
kspace::KernelMeta, kspace::KernelMeta,
paddr_to_vaddr, paddr_to_vaddr, PAGE_SIZE,
page::{self, ContPages},
PAGE_SIZE,
}, },
task::Task, task::Task,
}; };
@ -25,7 +24,7 @@ const AP_BOOT_STACK_SIZE: usize = PAGE_SIZE * 64;
pub(crate) struct ApBootInfo { pub(crate) struct ApBootInfo {
/// It holds the boot stack top pointers used by all APs. /// It holds the boot stack top pointers used by all APs.
pub(crate) boot_stack_array: ContPages<KernelMeta>, pub(crate) boot_stack_array: Segment<KernelMeta>,
/// `per_ap_info` maps each AP's ID to its associated boot information. /// `per_ap_info` maps each AP's ID to its associated boot information.
per_ap_info: BTreeMap<u32, PerApInfo>, per_ap_info: BTreeMap<u32, PerApInfo>,
} }
@ -33,10 +32,10 @@ pub(crate) struct ApBootInfo {
struct PerApInfo { struct PerApInfo {
is_started: AtomicBool, is_started: AtomicBool,
// TODO: When the AP starts up and begins executing tasks, the boot stack will // TODO: When the AP starts up and begins executing tasks, the boot stack will
// no longer be used, and the `ContPages` can be deallocated (this problem also // no longer be used, and the `Segment` can be deallocated (this problem also
// exists in the boot processor, but the memory it occupies should be returned // exists in the boot processor, but the memory it occupies should be returned
// to the frame allocator). // to the frame allocator).
boot_stack_pages: ContPages<KernelMeta>, boot_stack_pages: Segment<KernelMeta>,
} }
static AP_LATE_ENTRY: Once<fn()> = Once::new(); static AP_LATE_ENTRY: Once<fn()> = Once::new();
@ -64,12 +63,12 @@ pub fn boot_all_aps() {
let mut per_ap_info = BTreeMap::new(); let mut per_ap_info = BTreeMap::new();
// Use two pages to place stack pointers of all APs, thus support up to 1024 APs. // Use two pages to place stack pointers of all APs, thus support up to 1024 APs.
let boot_stack_array = let boot_stack_array =
page::allocator::alloc_contiguous(2 * PAGE_SIZE, |_| KernelMeta::default()).unwrap(); frame::allocator::alloc_contiguous(2 * PAGE_SIZE, |_| KernelMeta::default()).unwrap();
assert!(num_cpus < 1024); assert!(num_cpus < 1024);
for ap in 1..num_cpus { for ap in 1..num_cpus {
let boot_stack_pages = let boot_stack_pages =
page::allocator::alloc_contiguous(AP_BOOT_STACK_SIZE, |_| KernelMeta::default()) frame::allocator::alloc_contiguous(AP_BOOT_STACK_SIZE, |_| KernelMeta::default())
.unwrap(); .unwrap();
let boot_stack_ptr = paddr_to_vaddr(boot_stack_pages.end_paddr()); let boot_stack_ptr = paddr_to_vaddr(boot_stack_pages.end_paddr());
let stack_array_ptr = paddr_to_vaddr(boot_stack_array.start_paddr()) as *mut u64; let stack_array_ptr = paddr_to_vaddr(boot_stack_array.start_paddr()) as *mut u64;

View File

@ -44,10 +44,9 @@ use spin::Once;
use crate::{ use crate::{
arch, arch,
mm::{ mm::{
frame::{self, Segment},
kspace::KernelMeta, kspace::KernelMeta,
paddr_to_vaddr, paddr_to_vaddr, PAGE_SIZE,
page::{self, ContPages},
PAGE_SIZE,
}, },
}; };
@ -79,7 +78,7 @@ pub(crate) unsafe fn early_init_bsp_local_base() {
} }
/// The BSP initializes the CPU-local areas for APs. /// The BSP initializes the CPU-local areas for APs.
static CPU_LOCAL_STORAGES: Once<Vec<ContPages<KernelMeta>>> = Once::new(); static CPU_LOCAL_STORAGES: Once<Vec<Segment<KernelMeta>>> = Once::new();
/// Initializes the CPU local data for the bootstrap processor (BSP). /// Initializes the CPU local data for the bootstrap processor (BSP).
/// ///
@ -100,7 +99,7 @@ pub unsafe fn init_on_bsp() {
for _ in 1..num_cpus { for _ in 1..num_cpus {
let ap_pages = { let ap_pages = {
let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE); let nbytes = (bsp_end_va - bsp_base_va).align_up(PAGE_SIZE);
page::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap() frame::allocator::alloc_contiguous(nbytes, |_| KernelMeta::default()).unwrap()
}; };
let ap_pages_ptr = paddr_to_vaddr(ap_pages.start_paddr()) as *mut u8; let ap_pages_ptr = paddr_to_vaddr(ap_pages.start_paddr()) as *mut u8;

View File

@ -84,7 +84,7 @@ unsafe fn init() {
boot::init(); boot::init();
logger::init(); logger::init();
mm::page::allocator::init(); mm::frame::allocator::init();
mm::kspace::init_kernel_page_table(mm::init_page_meta()); mm::kspace::init_kernel_page_table(mm::init_page_meta());
mm::dma::init(); mm::dma::init();

View File

@ -13,7 +13,7 @@ use crate::{
io::VmIoOnce, io::VmIoOnce,
kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE}, kspace::{paddr_to_vaddr, KERNEL_PAGE_TABLE},
page_prop::CachePolicy, page_prop::CachePolicy,
HasPaddr, Infallible, Paddr, PodOnce, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE, HasPaddr, Infallible, Paddr, PodOnce, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE,
}, },
prelude::*, prelude::*,
}; };
@ -38,7 +38,7 @@ pub struct DmaCoherent {
#[derive(Debug)] #[derive(Debug)]
struct DmaCoherentInner { struct DmaCoherentInner {
vm_segment: Segment, vm_segment: UntypedSegment,
start_daddr: Daddr, start_daddr: Daddr,
is_cache_coherent: bool, is_cache_coherent: bool,
} }
@ -54,7 +54,7 @@ impl DmaCoherent {
/// The method fails if any part of the given `vm_segment` /// The method fails if any part of the given `vm_segment`
/// already belongs to a DMA mapping. /// already belongs to a DMA mapping.
pub fn map( pub fn map(
vm_segment: Segment, vm_segment: UntypedSegment,
is_cache_coherent: bool, is_cache_coherent: bool,
) -> core::result::Result<Self, DmaError> { ) -> core::result::Result<Self, DmaError> {
let frame_count = vm_segment.nbytes() / PAGE_SIZE; let frame_count = vm_segment.nbytes() / PAGE_SIZE;
@ -123,7 +123,7 @@ impl HasDaddr for DmaCoherent {
} }
impl Deref for DmaCoherent { impl Deref for DmaCoherent {
type Target = Segment; type Target = UntypedSegment;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.inner.vm_segment &self.inner.vm_segment
} }

View File

@ -11,7 +11,7 @@ use crate::{
error::Error, error::Error,
mm::{ mm::{
dma::{dma_type, Daddr, DmaType}, dma::{dma_type, Daddr, DmaType},
HasPaddr, Infallible, Paddr, Segment, VmIo, VmReader, VmWriter, PAGE_SIZE, HasPaddr, Infallible, Paddr, UntypedSegment, VmIo, VmReader, VmWriter, PAGE_SIZE,
}, },
}; };
@ -34,7 +34,7 @@ pub struct DmaStream {
#[derive(Debug)] #[derive(Debug)]
struct DmaStreamInner { struct DmaStreamInner {
vm_segment: Segment, vm_segment: UntypedSegment,
start_daddr: Daddr, start_daddr: Daddr,
/// TODO: remove this field when on x86. /// TODO: remove this field when on x86.
#[allow(unused)] #[allow(unused)]
@ -55,11 +55,11 @@ pub enum DmaDirection {
} }
impl DmaStream { impl DmaStream {
/// Establishes DMA stream mapping for a given [`Segment`]. /// Establishes DMA stream mapping for a given [`UntypedSegment`].
/// ///
/// The method fails if the segment already belongs to a DMA mapping. /// The method fails if the segment already belongs to a DMA mapping.
pub fn map( pub fn map(
vm_segment: Segment, vm_segment: UntypedSegment,
direction: DmaDirection, direction: DmaDirection,
is_cache_coherent: bool, is_cache_coherent: bool,
) -> Result<Self, DmaError> { ) -> Result<Self, DmaError> {
@ -107,13 +107,13 @@ impl DmaStream {
}) })
} }
/// Gets the underlying [`Segment`]. /// Gets the underlying [`UntypedSegment`].
/// ///
/// Usually, the CPU side should not access the memory /// Usually, the CPU side should not access the memory
/// after the DMA mapping is established because /// after the DMA mapping is established because
/// there is a chance that the device is updating /// there is a chance that the device is updating
/// the memory. Do this at your own risk. /// the memory. Do this at your own risk.
pub fn vm_segment(&self) -> &Segment { pub fn vm_segment(&self) -> &UntypedSegment {
&self.inner.vm_segment &self.inner.vm_segment
} }

View File

@ -10,7 +10,7 @@ use buddy_system_allocator::FrameAllocator;
use log::info; use log::info;
use spin::Once; use spin::Once;
use super::{cont_pages::ContPages, meta::PageMeta, Page}; use super::{meta::FrameMeta, segment::Segment, Frame};
use crate::{ use crate::{
boot::memory_region::MemoryRegionType, boot::memory_region::MemoryRegionType,
mm::{Paddr, PAGE_SIZE}, mm::{Paddr, PAGE_SIZE},
@ -62,7 +62,7 @@ pub(in crate::mm) static PAGE_ALLOCATOR: Once<SpinLock<CountingFrameAllocator>>
/// Allocate a single page. /// Allocate a single page.
/// ///
/// The metadata of the page is initialized with the given metadata. /// The metadata of the page is initialized with the given metadata.
pub(crate) fn alloc_single<M: PageMeta>(metadata: M) -> Option<Page<M>> { pub(crate) fn alloc_single<M: FrameMeta>(metadata: M) -> Option<Frame<M>> {
PAGE_ALLOCATOR PAGE_ALLOCATOR
.get() .get()
.unwrap() .unwrap()
@ -71,7 +71,7 @@ pub(crate) fn alloc_single<M: PageMeta>(metadata: M) -> Option<Page<M>> {
.alloc(1) .alloc(1)
.map(|idx| { .map(|idx| {
let paddr = idx * PAGE_SIZE; let paddr = idx * PAGE_SIZE;
Page::from_unused(paddr, metadata) Frame::from_unused(paddr, metadata)
}) })
} }
@ -84,7 +84,7 @@ pub(crate) fn alloc_single<M: PageMeta>(metadata: M) -> Option<Page<M>> {
/// # Panics /// # Panics
/// ///
/// The function panics if the length is not base-page-aligned. /// The function panics if the length is not base-page-aligned.
pub(crate) fn alloc_contiguous<M: PageMeta, F>(len: usize, metadata_fn: F) -> Option<ContPages<M>> pub(crate) fn alloc_contiguous<M: FrameMeta, F>(len: usize, metadata_fn: F) -> Option<Segment<M>>
where where
F: FnMut(Paddr) -> M, F: FnMut(Paddr) -> M,
{ {
@ -95,9 +95,7 @@ where
.disable_irq() .disable_irq()
.lock() .lock()
.alloc(len / PAGE_SIZE) .alloc(len / PAGE_SIZE)
.map(|start| { .map(|start| Segment::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len, metadata_fn))
ContPages::from_unused(start * PAGE_SIZE..start * PAGE_SIZE + len, metadata_fn)
})
} }
pub(crate) fn init() { pub(crate) fn init() {

View File

@ -11,7 +11,7 @@
//! address. It is faster, simpler, safer and more versatile compared with an actual static array //! address. It is faster, simpler, safer and more versatile compared with an actual static array
//! implementation. //! implementation.
pub mod mapping { pub(crate) mod mapping {
//! The metadata of each physical page is linear mapped to fixed virtual addresses //! The metadata of each physical page is linear mapped to fixed virtual addresses
//! in [`FRAME_METADATA_RANGE`]. //! in [`FRAME_METADATA_RANGE`].
@ -21,14 +21,14 @@ pub mod mapping {
use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE}; use crate::mm::{kspace::FRAME_METADATA_RANGE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE};
/// Converts a physical address of a base page to the virtual address of the metadata slot. /// Converts a physical address of a base page to the virtual address of the metadata slot.
pub const fn page_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr { pub(crate) const fn page_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr {
let base = FRAME_METADATA_RANGE.start; let base = FRAME_METADATA_RANGE.start;
let offset = paddr / PAGE_SIZE; let offset = paddr / PAGE_SIZE;
base + offset * size_of::<MetaSlot>() base + offset * size_of::<MetaSlot>()
} }
/// Converts a virtual address of the metadata slot to the physical address of the page. /// Converts a virtual address of the metadata slot to the physical address of the page.
pub const fn meta_to_page<C: PagingConstsTrait>(vaddr: Vaddr) -> Paddr { pub(crate) const fn meta_to_page<C: PagingConstsTrait>(vaddr: Vaddr) -> Paddr {
let base = FRAME_METADATA_RANGE.start; let base = FRAME_METADATA_RANGE.start;
let offset = (vaddr - base) / size_of::<MetaSlot>(); let offset = (vaddr - base) / size_of::<MetaSlot>();
offset * PAGE_SIZE offset * PAGE_SIZE
@ -46,7 +46,7 @@ use align_ext::AlignExt;
use log::info; use log::info;
use static_assertions::const_assert_eq; use static_assertions::const_assert_eq;
use super::{allocator, ContPages}; use super::{allocator, Segment};
use crate::{ use crate::{
arch::mm::PagingConsts, arch::mm::PagingConsts,
mm::{ mm::{
@ -58,7 +58,7 @@ use crate::{
/// The maximum number of bytes of the metadata of a page. /// The maximum number of bytes of the metadata of a page.
pub const PAGE_METADATA_MAX_SIZE: usize = pub const PAGE_METADATA_MAX_SIZE: usize =
META_SLOT_SIZE - size_of::<AtomicU32>() - size_of::<PageMetaVtablePtr>(); META_SLOT_SIZE - size_of::<AtomicU32>() - size_of::<FrameMetaVtablePtr>();
/// The maximum alignment in bytes of the metadata of a page. /// The maximum alignment in bytes of the metadata of a page.
pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::<MetaSlot>(); pub const PAGE_METADATA_MAX_ALIGN: usize = align_of::<MetaSlot>();
@ -70,7 +70,7 @@ pub(in crate::mm) struct MetaSlot {
/// ///
/// It is placed at the beginning of a slot because: /// It is placed at the beginning of a slot because:
/// - the implementation can simply cast a `*const MetaSlot` /// - the implementation can simply cast a `*const MetaSlot`
/// to a `*const PageMeta` for manipulation; /// to a `*const FrameMeta` for manipulation;
/// - if the metadata need special alignment, we can provide /// - if the metadata need special alignment, we can provide
/// at most `PAGE_METADATA_ALIGN` bytes of alignment; /// at most `PAGE_METADATA_ALIGN` bytes of alignment;
/// - the subsequent fields can utilize the padding of the /// - the subsequent fields can utilize the padding of the
@ -79,24 +79,24 @@ pub(in crate::mm) struct MetaSlot {
/// The reference count of the page. /// The reference count of the page.
/// ///
/// Specifically, the reference count has the following meaning: /// Specifically, the reference count has the following meaning:
/// * `REF_COUNT_UNUSED`: The page is not in use. /// * `REF_COUNT_UNUSED`: The page is not in use.
/// * `0`: The page is being constructed ([`Page::from_unused`]) /// * `0`: The page is being constructed ([`Page::from_unused`])
/// or destructured ([`drop_last_in_place`]). /// or destructured ([`drop_last_in_place`]).
/// * `1..REF_COUNT_MAX`: The page is in use. /// * `1..REF_COUNT_MAX`: The page is in use.
/// * `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to /// * `REF_COUNT_MAX..REF_COUNT_UNUSED`: Illegal values to
/// prevent the reference count from overflowing. Otherwise, /// prevent the reference count from overflowing. Otherwise,
/// overflowing the reference count will cause soundness issue. /// overflowing the reference count will cause soundness issue.
/// ///
/// [`Page::from_unused`]: super::Page::from_unused /// [`Frame::from_unused`]: super::Frame::from_unused
pub(super) ref_count: AtomicU32, pub(super) ref_count: AtomicU32,
/// The virtual table that indicates the type of the metadata. /// The virtual table that indicates the type of the metadata.
pub(super) vtable_ptr: UnsafeCell<MaybeUninit<PageMetaVtablePtr>>, pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>,
} }
pub(super) const REF_COUNT_UNUSED: u32 = u32::MAX; pub(super) const REF_COUNT_UNUSED: u32 = u32::MAX;
const REF_COUNT_MAX: u32 = i32::MAX as u32; const REF_COUNT_MAX: u32 = i32::MAX as u32;
type PageMetaVtablePtr = core::ptr::DynMetadata<dyn PageMeta>; type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn FrameMeta>;
const_assert_eq!(PAGE_SIZE % META_SLOT_SIZE, 0); const_assert_eq!(PAGE_SIZE % META_SLOT_SIZE, 0);
const_assert_eq!(size_of::<MetaSlot>(), META_SLOT_SIZE); const_assert_eq!(size_of::<MetaSlot>(), META_SLOT_SIZE);
@ -113,29 +113,30 @@ const_assert_eq!(size_of::<MetaSlot>(), META_SLOT_SIZE);
/// The implemented structure must have a size less than or equal to /// The implemented structure must have a size less than or equal to
/// [`PAGE_METADATA_MAX_SIZE`] and an alignment less than or equal to /// [`PAGE_METADATA_MAX_SIZE`] and an alignment less than or equal to
/// [`PAGE_METADATA_MAX_ALIGN`]. /// [`PAGE_METADATA_MAX_ALIGN`].
pub unsafe trait PageMeta: Any + Send + Sync + 'static { pub unsafe trait FrameMeta: Any + Send + Sync + 'static {
/// Called when the last handle to the page is dropped.
fn on_drop(&mut self, _paddr: Paddr) {} fn on_drop(&mut self, _paddr: Paddr) {}
} }
/// Makes a structure usable as a page metadata. /// Makes a structure usable as a page metadata.
/// ///
/// Directly implementing [`PageMeta`] is not safe since the size and alignment /// Directly implementing [`FrameMeta`] is not safe since the size and alignment
/// must be checked. This macro provides a safe way to implement the trait with /// must be checked. This macro provides a safe way to implement the trait with
/// compile-time checks. /// compile-time checks.
#[macro_export] #[macro_export]
macro_rules! impl_page_meta { macro_rules! impl_frame_meta_for {
($($t:ty),*) => { ($($t:ty),*) => {
$( $(
use static_assertions::const_assert; use static_assertions::const_assert;
const_assert!(size_of::<$t>() <= $crate::mm::page::meta::PAGE_METADATA_MAX_SIZE); const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_SIZE);
const_assert!(align_of::<$t>() <= $crate::mm::page::meta::PAGE_METADATA_MAX_ALIGN); const_assert!(align_of::<$t>() <= $crate::mm::frame::meta::PAGE_METADATA_MAX_ALIGN);
// SAFETY: The size and alignment of the structure are checked. // SAFETY: The size and alignment of the structure are checked.
unsafe impl $crate::mm::page::meta::PageMeta for $t {} unsafe impl $crate::mm::frame::meta::FrameMeta for $t {}
)* )*
}; };
} }
pub use impl_page_meta; pub use impl_frame_meta_for;
impl MetaSlot { impl MetaSlot {
/// Increases the page reference count by one. /// Increases the page reference count by one.
@ -178,7 +179,7 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
// SAFETY: The page metadata is initialized and valid. // SAFETY: The page metadata is initialized and valid.
let vtable_ptr = unsafe { vtable_ptr.assume_init_read() }; let vtable_ptr = unsafe { vtable_ptr.assume_init_read() };
let meta_ptr: *mut dyn PageMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr); let meta_ptr: *mut dyn FrameMeta = core::ptr::from_raw_parts_mut(ptr, vtable_ptr);
// SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
// `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
@ -209,12 +210,12 @@ pub(super) unsafe fn drop_last_in_place(ptr: *mut MetaSlot) {
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct MetaPageMeta {} pub struct MetaPageMeta {}
impl_page_meta!(MetaPageMeta); impl_frame_meta_for!(MetaPageMeta);
/// Initializes the metadata of all physical pages. /// Initializes the metadata of all physical pages.
/// ///
/// The function returns a list of `Page`s containing the metadata. /// The function returns a list of `Frame`s containing the metadata.
pub(crate) fn init() -> ContPages<MetaPageMeta> { pub(crate) fn init() -> Segment<MetaPageMeta> {
let max_paddr = { let max_paddr = {
let regions = crate::boot::memory_regions(); let regions = crate::boot::memory_regions();
regions.iter().map(|r| r.base() + r.len()).max().unwrap() regions.iter().map(|r| r.base() + r.len()).max().unwrap()
@ -249,7 +250,7 @@ pub(crate) fn init() -> ContPages<MetaPageMeta> {
.unwrap(); .unwrap();
// Now the metadata pages are mapped, we can initialize the metadata. // Now the metadata pages are mapped, we can initialize the metadata.
ContPages::from_unused(meta_pages..meta_pages + num_meta_pages * PAGE_SIZE, |_| { Segment::from_unused(meta_pages..meta_pages + num_meta_pages * PAGE_SIZE, |_| {
MetaPageMeta {} MetaPageMeta {}
}) })
} }

View File

@ -1,91 +1,177 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! Untyped physical memory management. //! Physical memory page management.
//! //!
//! A frame is a special page that is _untyped_ memory. //! A page is an aligned, contiguous range of bytes in physical memory. The sizes
//! It is used to store data irrelevant to the integrity of the kernel. //! of base pages and huge pages are architecture-dependent. A page can be mapped
//! All pages mapped to the virtual address space of the users are backed by //! to a virtual address using the page table.
//! frames. Frames, with all the properties of pages, can additionally be safely //!
//! read and written by the kernel or the user. //! Pages can be accessed through page handles, namely, [`Frame`]. A page handle
//! is a reference-counted handle to a page. When all handles to a page are dropped,
//! the page is released and can be reused.
//!
//! Pages can have dedicated metadata, which is implemented in the [`meta`] module.
//! The reference count and usage of a page are stored in the metadata as well, leaving
//! the handle only a pointer to the metadata.
pub mod options; pub mod allocator;
pub mod meta;
mod segment; mod segment;
pub mod untyped;
use core::mem::ManuallyDrop; use core::{
any::Any,
marker::PhantomData,
mem::ManuallyDrop,
sync::atomic::{AtomicUsize, Ordering},
};
use meta::{
mapping, FrameMeta, MetaSlot, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED,
};
pub use segment::Segment; pub use segment::Segment;
use untyped::UntypedMeta;
use super::{ use super::{PagingLevel, UntypedFrame, PAGE_SIZE};
page::{ use crate::mm::{Paddr, PagingConsts, Vaddr};
meta::{impl_page_meta, MetaSlot},
DynPage, Page,
},
Infallible,
};
use crate::{
mm::{
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
paddr_to_vaddr, HasPaddr, Paddr, PAGE_SIZE,
},
Error, Result,
};
/// A handle to a physical memory page of untyped memory. static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
///
/// An instance of `Frame` is a handle to a page frame (a physical memory /// A page with a statically-known usage, whose metadata is represented by `M`.
/// page). A cloned `Frame` refers to the same page frame as the original. #[derive(Debug)]
/// As the original and cloned instances point to the same physical address, pub struct Frame<M: FrameMeta> {
/// they are treated as equal to each other. Behind the scene, a reference pub(super) ptr: *const MetaSlot,
/// counter is maintained for each page frame so that when all instances of pub(super) _marker: PhantomData<M>,
/// `Frame` that refer to the same page frame are dropped, the page frame
/// will be globally freed.
#[derive(Debug, Clone)]
pub struct Frame {
page: Page<FrameMeta>,
} }
impl Frame { unsafe impl<M: FrameMeta> Send for Frame<M> {}
/// Returns the physical address of the page frame.
pub fn start_paddr(&self) -> Paddr { unsafe impl<M: FrameMeta> Sync for Frame<M> {}
self.page.paddr()
impl<M: FrameMeta> Frame<M> {
/// Get a `Frame` handle with a specific usage from a raw, unused page.
///
/// The caller should provide the initial metadata of the page.
///
/// # Panics
///
/// The function panics if:
/// - the physical address is out of bound or not aligned;
/// - the page is already in use.
pub fn from_unused(paddr: Paddr, metadata: M) -> Self {
assert!(paddr % PAGE_SIZE == 0);
assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
// Checking unsafe preconditions of the `FrameMeta` trait.
debug_assert!(size_of::<M>() <= PAGE_METADATA_MAX_SIZE);
debug_assert!(align_of::<M>() <= PAGE_METADATA_MAX_ALIGN);
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
// immutable reference to it is always safe.
let slot = unsafe { &*ptr };
// `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata
// initialization won't be reordered before this memory compare-and-exchange.
slot.ref_count
.compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed)
.expect("Frame already in use when trying to get a new handle");
// SAFETY: We have exclusive access to the page metadata.
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
vtable_ptr.write(core::ptr::metadata(&metadata as &dyn FrameMeta));
// SAFETY:
// 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the
// metadata storage.
// 2. The size and the alignment of the metadata storage is large enough to hold `M`
// (guaranteed by the safety requirement of the `FrameMeta` trait).
// 3. We have exclusive access to the metadata storage (guaranteed by the reference count).
unsafe { ptr.cast::<M>().cast_mut().write(metadata) };
// Assuming no one can create a `Frame` instance directly from the page address, `Relaxed`
// is fine here. Otherwise, we should use `Release` to ensure that the metadata
// initialization won't be reordered after this memory store.
slot.ref_count.store(1, Ordering::Relaxed);
Self {
ptr,
_marker: PhantomData,
}
} }
/// Returns the end physical address of the page frame. /// Forget the handle to the page.
pub fn end_paddr(&self) -> Paddr { ///
self.start_paddr() + PAGE_SIZE /// This will result in the page being leaked without calling the custom dropper.
///
/// A physical address to the page is returned in case the page needs to be
/// restored using [`Frame::from_raw`] later. This is useful when some architectural
/// data structures need to hold the page handle such as the page table.
#[allow(unused)]
pub(in crate::mm) fn into_raw(self) -> Paddr {
let paddr = self.paddr();
core::mem::forget(self);
paddr
} }
/// Returns the size of the frame /// Restore a forgotten `Frame` from a physical address.
///
/// # Safety
///
/// The caller should only restore a `Frame` that was previously forgotten using
/// [`Frame::into_raw`].
///
/// And the restoring operation should only be done once for a forgotten
/// `Frame`. Otherwise double-free will happen.
///
/// Also, the caller ensures that the usage of the page is correct. There's
/// no checking of the usage in this function.
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
Self {
ptr,
_marker: PhantomData,
}
}
/// Get the physical address.
pub fn paddr(&self) -> Paddr {
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
}
/// Get the paging level of this page.
///
/// This is the level of the page table entry that maps the frame,
/// which determines the size of the frame.
///
/// Currently, the level is always 1, which means the frame is a regular
/// page frame.
pub const fn level(&self) -> PagingLevel {
1
}
/// Size of this page in bytes.
pub const fn size(&self) -> usize { pub const fn size(&self) -> usize {
self.page.size() PAGE_SIZE
} }
/// Returns a raw pointer to the starting virtual address of the frame. /// Get the metadata of this page.
pub fn as_ptr(&self) -> *const u8 { pub fn meta(&self) -> &M {
paddr_to_vaddr(self.start_paddr()) as *const u8 // SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably
// borrowed as `M` because the type is correct, it lives under the given lifetime, and no
// one will mutably borrow the page metadata after initialization.
unsafe { &*self.ptr.cast() }
} }
/// Returns a mutable raw pointer to the starting virtual address of the frame. /// Get the reference count of the page.
pub fn as_mut_ptr(&self) -> *mut u8 {
paddr_to_vaddr(self.start_paddr()) as *mut u8
}
/// Copies the content of `src` to the frame.
pub fn copy_from(&self, src: &Frame) {
if self.paddr() == src.paddr() {
return;
}
// SAFETY: the source and the destination does not overlap.
unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size());
}
}
/// Get the reference count of the frame.
/// ///
/// It returns the number of all references to the page, including all the /// It returns the number of all references to the page, including all the
/// existing page handles ([`Frame`]) and all the mappings in the page /// existing page handles ([`Frame`], [`AnyFrame`]), and all the mappings in the
/// table that points to the page. /// page table that points to the page.
/// ///
/// # Safety /// # Safety
/// ///
@ -93,147 +179,203 @@ impl Frame {
/// reference count can be changed by other threads at any time including /// reference count can be changed by other threads at any time including
/// potentially between calling this method and acting on the result. /// potentially between calling this method and acting on the result.
pub fn reference_count(&self) -> u32 { pub fn reference_count(&self) -> u32 {
self.page.reference_count() self.slot().ref_count.load(Ordering::Relaxed)
}
fn slot(&self) -> &MetaSlot {
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
// immutable reference to it is always safe.
unsafe { &*self.ptr }
} }
} }
impl From<Page<FrameMeta>> for Frame { impl<M: FrameMeta> Clone for Frame<M> {
fn from(page: Page<FrameMeta>) -> Self { fn clone(&self) -> Self {
Self { page } // SAFETY: We have already held a reference to the page.
} unsafe { self.slot().inc_ref_count() };
}
impl TryFrom<DynPage> for Frame {
type Error = DynPage;
/// Try converting a [`DynPage`] into the statically-typed [`Frame`].
///
/// If the dynamic page is not used as an untyped page frame, it will
/// return the dynamic page itself as is.
fn try_from(page: DynPage) -> core::result::Result<Self, Self::Error> {
page.try_into().map(|p: Page<FrameMeta>| p.into())
}
}
impl From<Frame> for Page<FrameMeta> {
fn from(frame: Frame) -> Self {
frame.page
}
}
impl HasPaddr for Frame {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl<'a> Frame {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
// SAFETY:
// - The memory range points to untyped memory.
// - The frame is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
unsafe { VmReader::from_kernel_space(self.as_ptr(), self.size()) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
// SAFETY:
// - The memory range points to untyped memory.
// - The frame is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.size()) }
}
}
impl VmIo for Frame {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let read_len = writer.avail().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(len == read_len);
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let write_len = reader.remain().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(len == write_len);
Ok(())
}
}
/// Metadata for a frame.
#[derive(Debug, Default)]
pub struct FrameMeta {}
impl_page_meta!(FrameMeta);
// Here are implementations for `xarray`.
use core::{marker::PhantomData, ops::Deref};
/// `FrameRef` is a struct that can work as `&'a Frame`.
///
/// This is solely useful for [`crate::collections::xarray`].
pub struct FrameRef<'a> {
inner: ManuallyDrop<Frame>,
_marker: PhantomData<&'a Frame>,
}
impl Deref for FrameRef<'_> {
type Target = Frame;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
// SAFETY: `Frame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
// The pointer is also aligned to 4.
unsafe impl xarray::ItemEntry for Frame {
type Ref<'a>
= FrameRef<'a>
where
Self: 'a;
fn into_raw(self) -> *const () {
let ptr = self.page.ptr;
core::mem::forget(self);
ptr as *const ()
}
unsafe fn from_raw(raw: *const ()) -> Self {
Self { Self {
page: Page::<FrameMeta> { ptr: self.ptr,
ptr: raw as *mut MetaSlot,
_marker: PhantomData,
},
}
}
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
Self::Ref {
inner: ManuallyDrop::new(Frame::from_raw(raw)),
_marker: PhantomData, _marker: PhantomData,
} }
} }
} }
impl<M: FrameMeta> Drop for Frame<M> {
fn drop(&mut self) {
let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release);
debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED);
if last_ref_cnt == 1 {
// A fence is needed here with the same reasons stated in the implementation of
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
core::sync::atomic::fence(Ordering::Acquire);
// SAFETY: this is the last reference and is about to be dropped.
unsafe {
meta::drop_last_in_place(self.ptr as *mut MetaSlot);
}
}
}
}
/// A page with a dynamically-known usage.
///
/// It can also be used when the user don't care about the usage of the page.
#[derive(Debug)]
pub struct AnyFrame {
ptr: *const MetaSlot,
}
unsafe impl Send for AnyFrame {}
unsafe impl Sync for AnyFrame {}
impl AnyFrame {
/// Forget the handle to the page.
///
/// This is the same as [`Frame::into_raw`].
///
/// This will result in the page being leaked without calling the custom dropper.
///
/// A physical address to the page is returned in case the page needs to be
/// restored using [`Self::from_raw`] later.
pub(in crate::mm) fn into_raw(self) -> Paddr {
let paddr = self.paddr();
core::mem::forget(self);
paddr
}
/// Restore a forgotten page from a physical address.
///
/// # Safety
///
/// The safety concerns are the same as [`Frame::from_raw`].
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
Self { ptr }
}
/// Get the metadata of this page.
pub fn meta(&self) -> &dyn Any {
let slot = self.slot();
// SAFETY: The page metadata is valid to be borrowed immutably, since it will never be
// borrowed mutably after initialization.
let vtable_ptr = unsafe { &*slot.vtable_ptr.get() };
// SAFETY: The page metadata is initialized and valid.
let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
let meta_ptr: *const dyn FrameMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr);
// SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably
// borrowed under `vtable_ptr` because the vtable is correct, it lives under the given
// lifetime, and no one will mutably borrow the page metadata after initialization.
(unsafe { &*meta_ptr }) as &dyn Any
}
/// Get the physical address of the start of the page
pub fn paddr(&self) -> Paddr {
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
}
/// Get the paging level of this page.
pub fn level(&self) -> PagingLevel {
1
}
/// Size of this page in bytes.
pub fn size(&self) -> usize {
PAGE_SIZE
}
fn slot(&self) -> &MetaSlot {
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
// immutable reference to it is always safe.
unsafe { &*self.ptr }
}
}
impl<M: FrameMeta> TryFrom<AnyFrame> for Frame<M> {
type Error = AnyFrame;
/// Try converting a [`AnyFrame`] into the statically-typed [`Frame`].
///
/// If the usage of the page is not the same as the expected usage, it will
/// return the dynamic page itself as is.
fn try_from(dyn_page: AnyFrame) -> Result<Self, Self::Error> {
if dyn_page.meta().is::<M>() {
let result = Frame {
ptr: dyn_page.ptr,
_marker: PhantomData,
};
let _ = ManuallyDrop::new(dyn_page);
Ok(result)
} else {
Err(dyn_page)
}
}
}
impl<M: FrameMeta> From<Frame<M>> for AnyFrame {
fn from(page: Frame<M>) -> Self {
let result = Self { ptr: page.ptr };
let _ = ManuallyDrop::new(page);
result
}
}
impl From<UntypedFrame> for AnyFrame {
fn from(frame: UntypedFrame) -> Self {
Frame::<UntypedMeta>::from(frame).into()
}
}
impl Clone for AnyFrame {
fn clone(&self) -> Self {
// SAFETY: We have already held a reference to the page.
unsafe { self.slot().inc_ref_count() };
Self { ptr: self.ptr }
}
}
impl Drop for AnyFrame {
fn drop(&mut self) {
let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release);
debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED);
if last_ref_cnt == 1 {
// A fence is needed here with the same reasons stated in the implementation of
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
core::sync::atomic::fence(Ordering::Acquire);
// SAFETY: this is the last reference and is about to be dropped.
unsafe {
meta::drop_last_in_place(self.ptr as *mut MetaSlot);
}
}
}
}
/// Increases the reference count of the page by one.
///
/// # Safety
///
/// The caller should ensure the following conditions:
/// 1. The physical address must represent a valid page;
/// 2. The caller must have already held a reference to the page.
pub(in crate::mm) unsafe fn inc_page_ref_count(paddr: Paddr) {
debug_assert!(paddr % PAGE_SIZE == 0);
debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
let vaddr: Vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
// SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking
// an immutable reference to it is always safe.
let slot = unsafe { &*(vaddr as *const MetaSlot) };
// SAFETY: We have already held a reference to the page.
unsafe { slot.inc_ref_count() };
}

View File

@ -1,178 +1,191 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
//! A contiguous segment of untyped memory pages. //! A contiguous range of pages.
use core::ops::Range; use alloc::vec::Vec;
use core::{mem::ManuallyDrop, ops::Range};
use crate::{ use super::{inc_page_ref_count, meta::FrameMeta, Frame};
mm::{ use crate::mm::{Paddr, PAGE_SIZE};
frame::FrameMeta,
io::{FallibleVmRead, FallibleVmWrite},
page::ContPages,
Frame, HasPaddr, Infallible, Paddr, VmIo, VmReader, VmWriter,
},
Error, Result,
};
/// A contiguous segment of untyped memory pages. /// A contiguous range of physical memory pages.
/// ///
/// A [`Segment`] object is a handle to a contiguous range of untyped memory /// This is a handle to many contiguous pages. It will be more lightweight
/// pages, and the underlying pages can be shared among multiple threads. /// than owning an array of page handles.
/// [`Segment::slice`] can be used to clone a slice of the segment (also can be
/// used to clone the entire range). Reference counts are maintained for each
/// page in the segment. So cloning the handle may not be cheap as it
/// increments the reference count of all the cloned pages.
/// ///
/// Other [`Frame`] handles can also refer to the pages in the segment. And /// The ownership is achieved by the reference counting mechanism of pages.
/// the segment can be iterated over to get all the frames in it. /// When constructing a `Segment`, the page handles are created then
/// /// forgotten, leaving the reference count. When dropping a it, the page
/// To allocate a segment, use [`crate::mm::FrameAllocator`]. /// handles are restored and dropped, decrementing the reference count.
///
/// # Example
///
/// ```rust
/// let vm_segment = FrameAllocOptions::new(2)
/// .is_contiguous(true)
/// .alloc_contiguous()?;
/// vm_segment.write_bytes(0, buf)?;
/// ```
#[derive(Debug)] #[derive(Debug)]
pub struct Segment { pub struct Segment<M: FrameMeta> {
pages: ContPages<FrameMeta>, range: Range<Paddr>,
_marker: core::marker::PhantomData<M>,
} }
impl HasPaddr for Segment { impl<M: FrameMeta> Drop for Segment<M> {
fn paddr(&self) -> Paddr { fn drop(&mut self) {
self.pages.start_paddr() for paddr in self.range.clone().step_by(PAGE_SIZE) {
} // SAFETY: for each page there would be a forgotten handle
} // when creating the `Segment` object.
drop(unsafe { Frame::<M>::from_raw(paddr) });
impl Clone for Segment {
fn clone(&self) -> Self {
Self {
pages: self.pages.clone(),
} }
} }
} }
impl Segment { impl<M: FrameMeta> Clone for Segment<M> {
/// Returns the start physical address. fn clone(&self) -> Self {
pub fn start_paddr(&self) -> Paddr { for paddr in self.range.clone().step_by(PAGE_SIZE) {
self.pages.start_paddr() // SAFETY: for each page there would be a forgotten handle
// when creating the `Segment` object, so we already have
// reference counts for the pages.
unsafe { inc_page_ref_count(paddr) };
}
Self {
range: self.range.clone(),
_marker: core::marker::PhantomData,
}
} }
}
/// Returns the end physical address. impl<M: FrameMeta> Segment<M> {
pub fn end_paddr(&self) -> Paddr { /// Creates a new `Segment` from unused pages.
self.pages.end_paddr()
}
/// Returns the number of bytes in it.
pub fn nbytes(&self) -> usize {
self.pages.nbytes()
}
/// Split the segment into two at the given byte offset from the start.
/// ///
/// The resulting segments cannot be empty. So the byte offset cannot be /// The caller must provide a closure to initialize metadata for all the pages.
/// neither zero nor the length of the segment. /// The closure receives the physical address of the page and returns the
/// metadata, which is similar to [`core::array::from_fn`].
/// ///
/// # Panics /// # Panics
/// ///
/// The function panics if the byte offset is out of bounds, at either ends, or /// The function panics if:
/// - the physical address is invalid or not aligned;
/// - any of the pages are already in use.
pub fn from_unused<F>(range: Range<Paddr>, mut metadata_fn: F) -> Self
where
F: FnMut(Paddr) -> M,
{
for paddr in range.clone().step_by(PAGE_SIZE) {
let _ = ManuallyDrop::new(Frame::<M>::from_unused(paddr, metadata_fn(paddr)));
}
Self {
range,
_marker: core::marker::PhantomData,
}
}
/// Gets the start physical address of the contiguous pages.
pub fn start_paddr(&self) -> Paddr {
self.range.start
}
/// Gets the end physical address of the contiguous pages.
pub fn end_paddr(&self) -> Paddr {
self.range.end
}
/// Gets the length in bytes of the contiguous pages.
pub fn nbytes(&self) -> usize {
self.range.end - self.range.start
}
/// Splits the pages into two at the given byte offset from the start.
///
/// The resulting pages cannot be empty. So the offset cannot be neither
/// zero nor the length of the pages.
///
/// # Panics
///
/// The function panics if the offset is out of bounds, at either ends, or
/// not base-page-aligned. /// not base-page-aligned.
pub fn split(self, offset: usize) -> (Self, Self) { pub fn split(self, offset: usize) -> (Self, Self) {
let (left, right) = self.pages.split(offset); assert!(offset % PAGE_SIZE == 0);
(Self { pages: left }, Self { pages: right }) assert!(0 < offset && offset < self.nbytes());
let old = ManuallyDrop::new(self);
let at = old.range.start + offset;
(
Self {
range: old.range.start..at,
_marker: core::marker::PhantomData,
},
Self {
range: at..old.range.end,
_marker: core::marker::PhantomData,
},
)
} }
/// Get an extra handle to the segment in the byte range. /// Gets an extra handle to the pages in the byte offset range.
/// ///
/// The sliced byte range in indexed by the offset from the start of the /// The sliced byte offset range in indexed by the offset from the start of
/// segment. The resulting segment holds extra reference counts. /// the contiguous pages. The resulting pages holds extra reference counts.
/// ///
/// # Panics /// # Panics
/// ///
/// The function panics if the byte range is out of bounds, or if any of /// The function panics if the byte offset range is out of bounds, or if
/// the ends of the byte range is not base-page aligned. /// any of the ends of the byte offset range is not base-page aligned.
pub fn slice(&self, range: &Range<usize>) -> Self { pub fn slice(&self, range: &Range<usize>) -> Self {
assert!(range.start % PAGE_SIZE == 0 && range.end % PAGE_SIZE == 0);
let start = self.range.start + range.start;
let end = self.range.start + range.end;
assert!(start <= end && end <= self.range.end);
for paddr in (start..end).step_by(PAGE_SIZE) {
// SAFETY: We already have reference counts for the pages since
// for each page there would be a forgotten handle when creating
// the `Segment` object.
unsafe { inc_page_ref_count(paddr) };
}
Self { Self {
pages: self.pages.slice(range), range: start..end,
_marker: core::marker::PhantomData,
} }
} }
/// Gets a [`VmReader`] to read from the segment from the beginning to the end.
pub fn reader(&self) -> VmReader<'_, Infallible> {
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8;
// SAFETY:
// - The memory range points to untyped memory.
// - The segment is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) }
}
/// Gets a [`VmWriter`] to write to the segment from the beginning to the end.
pub fn writer(&self) -> VmWriter<'_, Infallible> {
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8;
// SAFETY:
// - The memory range points to untyped memory.
// - The segment is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) }
}
} }
impl From<Frame> for Segment { impl<M: FrameMeta> From<Frame<M>> for Segment<M> {
fn from(frame: Frame) -> Self { fn from(page: Frame<M>) -> Self {
let pa = page.paddr();
let _ = ManuallyDrop::new(page);
Self { Self {
pages: ContPages::from(frame.page), range: pa..pa + PAGE_SIZE,
_marker: core::marker::PhantomData,
} }
} }
} }
impl From<ContPages<FrameMeta>> for Segment { impl<M: FrameMeta> From<Segment<M>> for Vec<Frame<M>> {
fn from(pages: ContPages<FrameMeta>) -> Self { fn from(pages: Segment<M>) -> Self {
Self { pages } let vector = pages
.range
.clone()
.step_by(PAGE_SIZE)
.map(|i|
// SAFETY: for each page there would be a forgotten handle
// when creating the `Segment` object.
unsafe { Frame::<M>::from_raw(i) })
.collect();
let _ = ManuallyDrop::new(pages);
vector
} }
} }
impl VmIo for Segment { impl<M: FrameMeta> Iterator for Segment<M> {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> { type Item = Frame<M>;
let read_len = writer.avail();
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self
.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(len == read_len);
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let write_len = reader.remain();
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self
.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(len == write_len);
Ok(())
}
}
impl Iterator for Segment {
type Item = Frame;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
self.pages.next().map(|page| Frame { page }) if self.range.start < self.range.end {
// SAFETY: each page in the range would be a handle forgotten
// when creating the `Segment` object.
let page = unsafe { Frame::<M>::from_raw(self.range.start) };
self.range.start += PAGE_SIZE;
// The end cannot be non-page-aligned.
debug_assert!(self.range.start <= self.range.end);
Some(page)
} else {
None
}
} }
} }

View File

@ -0,0 +1,236 @@
// SPDX-License-Identifier: MPL-2.0
//! Untyped physical memory management.
//!
//! A frame is a special page that is _untyped_ memory.
//! It is used to store data irrelevant to the integrity of the kernel.
//! All pages mapped to the virtual address space of the users are backed by
//! frames. Frames, with all the properties of pages, can additionally be safely
//! read and written by the kernel or the user.
pub mod options;
mod segment;
use core::mem::ManuallyDrop;
pub use segment::UntypedSegment;
use super::{
meta::{impl_frame_meta_for, MetaSlot},
AnyFrame, Frame,
};
use crate::{
mm::{
io::{FallibleVmRead, FallibleVmWrite, VmIo, VmReader, VmWriter},
paddr_to_vaddr, HasPaddr, Infallible, Paddr, PAGE_SIZE,
},
Error, Result,
};
/// A handle to a physical memory page of untyped memory.
///
/// An instance of `UntypedFrame` is a handle to a page frame (a physical memory
/// page). A cloned `UntypedFrame` refers to the same page frame as the original.
/// As the original and cloned instances point to the same physical address,
/// they are treated as equal to each other. Behind the scene, a reference
/// counter is maintained for each page frame so that when all instances of
/// `UntypedFrame` that refer to the same page frame are dropped, the page frame
/// will be globally freed.
#[derive(Debug, Clone)]
pub struct UntypedFrame {
page: Frame<UntypedMeta>,
}
impl UntypedFrame {
/// Returns the physical address of the page frame.
pub fn start_paddr(&self) -> Paddr {
self.page.paddr()
}
/// Returns the end physical address of the page frame.
pub fn end_paddr(&self) -> Paddr {
self.start_paddr() + PAGE_SIZE
}
/// Returns the size of the frame
pub const fn size(&self) -> usize {
self.page.size()
}
/// Returns a raw pointer to the starting virtual address of the frame.
pub fn as_ptr(&self) -> *const u8 {
paddr_to_vaddr(self.start_paddr()) as *const u8
}
/// Returns a mutable raw pointer to the starting virtual address of the frame.
pub fn as_mut_ptr(&self) -> *mut u8 {
paddr_to_vaddr(self.start_paddr()) as *mut u8
}
/// Copies the content of `src` to the frame.
pub fn copy_from(&self, src: &UntypedFrame) {
if self.paddr() == src.paddr() {
return;
}
// SAFETY: the source and the destination does not overlap.
unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.size());
}
}
/// Get the reference count of the frame.
///
/// It returns the number of all references to the page, including all the
/// existing page handles ([`UntypedFrame`]) and all the mappings in the page
/// table that points to the page.
///
/// # Safety
///
/// The function is safe to call, but using it requires extra care. The
/// reference count can be changed by other threads at any time including
/// potentially between calling this method and acting on the result.
pub fn reference_count(&self) -> u32 {
self.page.reference_count()
}
}
impl From<Frame<UntypedMeta>> for UntypedFrame {
fn from(page: Frame<UntypedMeta>) -> Self {
Self { page }
}
}
impl TryFrom<AnyFrame> for UntypedFrame {
type Error = AnyFrame;
/// Try converting a [`AnyFrame`] into the statically-typed [`UntypedFrame`].
///
/// If the dynamic page is not used as an untyped page frame, it will
/// return the dynamic page itself as is.
fn try_from(page: AnyFrame) -> core::result::Result<Self, Self::Error> {
page.try_into().map(|p: Frame<UntypedMeta>| p.into())
}
}
impl From<UntypedFrame> for Frame<UntypedMeta> {
fn from(frame: UntypedFrame) -> Self {
frame.page
}
}
impl HasPaddr for UntypedFrame {
fn paddr(&self) -> Paddr {
self.start_paddr()
}
}
impl<'a> UntypedFrame {
/// Returns a reader to read data from it.
pub fn reader(&'a self) -> VmReader<'a, Infallible> {
// SAFETY:
// - The memory range points to untyped memory.
// - The frame is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
unsafe { VmReader::from_kernel_space(self.as_ptr(), self.size()) }
}
/// Returns a writer to write data into it.
pub fn writer(&'a self) -> VmWriter<'a, Infallible> {
// SAFETY:
// - The memory range points to untyped memory.
// - The frame is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the frame.
unsafe { VmWriter::from_kernel_space(self.as_mut_ptr(), self.size()) }
}
}
impl VmIo for UntypedFrame {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let read_len = writer.avail().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(len == read_len);
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let write_len = reader.remain().min(self.size().saturating_sub(offset));
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(write_len).ok_or(Error::Overflow)?;
if max_offset > self.size() {
return Err(Error::InvalidArgs);
}
let len = self
.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(len == write_len);
Ok(())
}
}
/// Metadata for a frame.
#[derive(Debug, Default)]
pub struct UntypedMeta {}
impl_frame_meta_for!(UntypedMeta);
// Here are implementations for `xarray`.
use core::{marker::PhantomData, ops::Deref};
/// `FrameRef` is a struct that can work as `&'a UntypedFrame`.
///
/// This is solely useful for [`crate::collections::xarray`].
pub struct FrameRef<'a> {
inner: ManuallyDrop<UntypedFrame>,
_marker: PhantomData<&'a UntypedFrame>,
}
impl Deref for FrameRef<'_> {
type Target = UntypedFrame;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
// SAFETY: `UntypedFrame` is essentially an `*const MetaSlot` that could be used as a `*const` pointer.
// The pointer is also aligned to 4.
unsafe impl xarray::ItemEntry for UntypedFrame {
type Ref<'a>
= FrameRef<'a>
where
Self: 'a;
fn into_raw(self) -> *const () {
let ptr = self.page.ptr;
core::mem::forget(self);
ptr as *const ()
}
unsafe fn from_raw(raw: *const ()) -> Self {
Self {
page: Frame::<UntypedMeta> {
ptr: raw as *mut MetaSlot,
_marker: PhantomData,
},
}
}
unsafe fn raw_as_ref<'a>(raw: *const ()) -> Self::Ref<'a> {
Self::Ref {
inner: ManuallyDrop::new(UntypedFrame::from_raw(raw)),
_marker: PhantomData,
}
}
}

View File

@ -2,9 +2,9 @@
//! Options for allocating frames //! Options for allocating frames
use super::{Frame, Segment}; use super::{UntypedFrame, UntypedSegment};
use crate::{ use crate::{
mm::{frame::FrameMeta, page, PAGE_SIZE}, mm::{frame, frame::untyped::UntypedMeta, PAGE_SIZE},
prelude::*, prelude::*,
Error, Error,
}; };
@ -52,13 +52,13 @@ impl FrameAllocOptions {
} }
/// Allocates a single page frame according to the given options. /// Allocates a single page frame according to the given options.
pub fn alloc_single(&self) -> Result<Frame> { pub fn alloc_single(&self) -> Result<UntypedFrame> {
if self.nframes != 1 { if self.nframes != 1 {
return Err(Error::InvalidArgs); return Err(Error::InvalidArgs);
} }
let page = page::allocator::alloc_single(FrameMeta::default()).ok_or(Error::NoMemory)?; let page = frame::allocator::alloc_single(UntypedMeta::default()).ok_or(Error::NoMemory)?;
let frame = Frame { page }; let frame = UntypedFrame { page };
if !self.uninit { if !self.uninit {
frame.writer().fill(0); frame.writer().fill(0);
} }
@ -68,17 +68,19 @@ impl FrameAllocOptions {
/// Allocates a contiguous range of page frames according to the given options. /// Allocates a contiguous range of page frames according to the given options.
/// ///
/// The returned [`Segment`] contains at least one page frame. /// The returned [`UntypedSegment`] contains at least one page frame.
pub fn alloc_contiguous(&self) -> Result<Segment> { pub fn alloc_contiguous(&self) -> Result<UntypedSegment> {
// It's no use to checking `self.is_contiguous` here. // It's no use to checking `self.is_contiguous` here.
if self.nframes == 0 { if self.nframes == 0 {
return Err(Error::InvalidArgs); return Err(Error::InvalidArgs);
} }
let segment: Segment = let segment: UntypedSegment =
page::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| FrameMeta::default()) frame::allocator::alloc_contiguous(self.nframes * PAGE_SIZE, |_| {
.ok_or(Error::NoMemory)? UntypedMeta::default()
.into(); })
.ok_or(Error::NoMemory)?
.into();
if !self.uninit { if !self.uninit {
segment.writer().fill(0); segment.writer().fill(0);
} }

View File

@ -0,0 +1,177 @@
// SPDX-License-Identifier: MPL-2.0
//! A contiguous segment of untyped memory pages.
use core::ops::Range;
use crate::{
mm::{
frame::{untyped::UntypedMeta, Segment},
io::{FallibleVmRead, FallibleVmWrite},
HasPaddr, Infallible, Paddr, UntypedFrame, VmIo, VmReader, VmWriter,
},
Error, Result,
};
/// A contiguous segment of untyped memory pages.
///
/// A [`UntypedSegment`] object is a handle to a contiguous range of untyped memory
/// pages, and the underlying pages can be shared among multiple threads.
/// [`UntypedSegment::slice`] can be used to clone a slice of the segment (also can be
/// used to clone the entire range). Reference counts are maintained for each
/// page in the segment. So cloning the handle may not be cheap as it
/// increments the reference count of all the cloned pages.
///
/// Other [`UntypedFrame`] handles can also refer to the pages in the segment. And
/// the segment can be iterated over to get all the frames in it.
///
/// To allocate a segment, use [`crate::mm::FrameAllocator`].
///
/// # Example
///
/// ```rust
/// let vm_segment = FrameAllocOptions::new(2)
/// .is_contiguous(true)
/// .alloc_contiguous()?;
/// vm_segment.write_bytes(0, buf)?;
/// ```
#[derive(Debug)]
pub struct UntypedSegment {
pages: Segment<UntypedMeta>,
}
impl HasPaddr for UntypedSegment {
fn paddr(&self) -> Paddr {
self.pages.start_paddr()
}
}
impl Clone for UntypedSegment {
fn clone(&self) -> Self {
Self {
pages: self.pages.clone(),
}
}
}
impl UntypedSegment {
/// Returns the start physical address.
pub fn start_paddr(&self) -> Paddr {
self.pages.start_paddr()
}
/// Returns the end physical address.
pub fn end_paddr(&self) -> Paddr {
self.pages.end_paddr()
}
/// Returns the number of bytes in it.
pub fn nbytes(&self) -> usize {
self.pages.nbytes()
}
/// Split the segment into two at the given byte offset from the start.
///
/// The resulting segments cannot be empty. So the byte offset cannot be
/// neither zero nor the length of the segment.
///
/// # Panics
///
/// The function panics if the byte offset is out of bounds, at either ends, or
/// not base-page-aligned.
pub fn split(self, offset: usize) -> (Self, Self) {
let (left, right) = self.pages.split(offset);
(Self { pages: left }, Self { pages: right })
}
/// Get an extra handle to the segment in the byte range.
///
/// The sliced byte range in indexed by the offset from the start of the
/// segment. The resulting segment holds extra reference counts.
///
/// # Panics
///
/// The function panics if the byte range is out of bounds, or if any of
/// the ends of the byte range is not base-page aligned.
pub fn slice(&self, range: &Range<usize>) -> Self {
Self {
pages: self.pages.slice(range),
}
}
/// Gets a [`VmReader`] to read from the segment from the beginning to the end.
pub fn reader(&self) -> VmReader<'_, Infallible> {
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *const u8;
// SAFETY:
// - The memory range points to untyped memory.
// - The segment is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
unsafe { VmReader::from_kernel_space(ptr, self.nbytes()) }
}
/// Gets a [`VmWriter`] to write to the segment from the beginning to the end.
pub fn writer(&self) -> VmWriter<'_, Infallible> {
let ptr = super::paddr_to_vaddr(self.start_paddr()) as *mut u8;
// SAFETY:
// - The memory range points to untyped memory.
// - The segment is alive during the lifetime `'a`.
// - Using `VmReader` and `VmWriter` is the only way to access the segment.
unsafe { VmWriter::from_kernel_space(ptr, self.nbytes()) }
}
}
impl From<UntypedFrame> for UntypedSegment {
fn from(frame: UntypedFrame) -> Self {
Self {
pages: Segment::from(frame.page),
}
}
}
impl From<Segment<UntypedMeta>> for UntypedSegment {
fn from(pages: Segment<UntypedMeta>) -> Self {
Self { pages }
}
}
impl VmIo for UntypedSegment {
fn read(&self, offset: usize, writer: &mut VmWriter) -> Result<()> {
let read_len = writer.avail();
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(read_len).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self
.reader()
.skip(offset)
.read_fallible(writer)
.map_err(|(e, _)| e)?;
debug_assert!(len == read_len);
Ok(())
}
fn write(&self, offset: usize, reader: &mut VmReader) -> Result<()> {
let write_len = reader.remain();
// Do bound check with potential integer overflow in mind
let max_offset = offset.checked_add(reader.remain()).ok_or(Error::Overflow)?;
if max_offset > self.nbytes() {
return Err(Error::InvalidArgs);
}
let len = self
.writer()
.skip(offset)
.write_fallible(reader)
.map_err(|(e, _)| e)?;
debug_assert!(len == write_len);
Ok(())
}
}
impl Iterator for UntypedSegment {
type Item = UntypedFrame;
fn next(&mut self) -> Option<Self::Item> {
self.pages.next().map(|page| UntypedFrame { page })
}
}

View File

@ -11,7 +11,7 @@ use spin::Once;
use super::paddr_to_vaddr; use super::paddr_to_vaddr;
use crate::{ use crate::{
mm::{page::allocator::PAGE_ALLOCATOR, PAGE_SIZE}, mm::{frame::allocator::PAGE_ALLOCATOR, PAGE_SIZE},
prelude::*, prelude::*,
sync::SpinLock, sync::SpinLock,
trap::disable_local, trap::disable_local,

View File

@ -7,11 +7,11 @@
//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and //! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_. //! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory //! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
//! (e.g., `&[u8]`) or untyped memory (e.g, [`Frame`]). Behind the scene, `VmReader` and `VmWriter` //! (e.g., `&[u8]`) or untyped memory (e.g, [`UntypedFrame`]). Behind the scene, `VmReader` and `VmWriter`
//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose //! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
//! safety depends on whether the given memory regions are _valid_ or not. //! safety depends on whether the given memory regions are _valid_ or not.
//! //!
//! [`Frame`]: crate::mm::Frame //! [`UntypedFrame`]: crate::mm::UntypedFrame
//! [`from_user_space`]: `VmReader::from_user_space` //! [`from_user_space`]: `VmReader::from_user_space`
//! [`from_kernel_space`]: `VmReader::from_kernel_space` //! [`from_kernel_space`]: `VmReader::from_kernel_space`
//! //!
@ -58,7 +58,7 @@ use crate::{
}; };
/// A trait that enables reading/writing data from/to a VM object, /// A trait that enables reading/writing data from/to a VM object,
/// e.g., [`Segment`], [`Vec<Frame>`] and [`Frame`]. /// e.g., [`UntypedSegment`], [`Vec<UntypedFrame>`] and [`UntypedFrame`].
/// ///
/// # Concurrency /// # Concurrency
/// ///
@ -67,8 +67,8 @@ use crate::{
/// desire predictability or atomicity, the users should add extra mechanism /// desire predictability or atomicity, the users should add extra mechanism
/// for such properties. /// for such properties.
/// ///
/// [`Segment`]: crate::mm::Segment /// [`UntypedSegment`]: crate::mm::UntypedSegment
/// [`Frame`]: crate::mm::Frame /// [`UntypedFrame`]: crate::mm::UntypedFrame
pub trait VmIo: Send + Sync { pub trait VmIo: Send + Sync {
/// Reads requested data at a specified offset into a given `VmWriter`. /// Reads requested data at a specified offset into a given `VmWriter`.
/// ///

View File

@ -11,7 +11,7 @@ use super::{KERNEL_PAGE_TABLE, TRACKED_MAPPED_PAGES_RANGE, VMALLOC_VADDR_RANGE};
use crate::{ use crate::{
cpu::CpuSet, cpu::CpuSet,
mm::{ mm::{
page::{meta::PageMeta, DynPage, Page}, frame::{meta::FrameMeta, AnyFrame, Frame},
page_prop::PageProperty, page_prop::PageProperty,
page_table::PageTableItem, page_table::PageTableItem,
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
@ -204,10 +204,10 @@ impl<M: AllocatorSelector + 'static> KVirtArea<M> {
impl KVirtArea<Tracked> { impl KVirtArea<Tracked> {
/// Maps pages into the kernel virtual area. /// Maps pages into the kernel virtual area.
pub fn map_pages<T: PageMeta>( pub fn map_pages<T: FrameMeta>(
&mut self, &mut self,
range: Range<Vaddr>, range: Range<Vaddr>,
pages: impl Iterator<Item = Page<T>>, pages: impl Iterator<Item = Frame<T>>,
prop: PageProperty, prop: PageProperty,
) { ) {
assert!(self.start() <= range.start && self.end() >= range.end); assert!(self.start() <= range.start && self.end() >= range.end);
@ -232,7 +232,7 @@ impl KVirtArea<Tracked> {
/// ///
/// This function returns None if the address is not mapped (`NotMapped`), /// This function returns None if the address is not mapped (`NotMapped`),
/// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page. /// while panics if the address is mapped to a `MappedUntracked` or `PageTableNode` page.
pub fn get_page(&self, addr: Vaddr) -> Option<DynPage> { pub fn get_page(&self, addr: Vaddr) -> Option<AnyFrame> {
let query_result = self.query_page(addr); let query_result = self.query_page(addr);
match query_result { match query_result {
PageTableItem::Mapped { PageTableItem::Mapped {

View File

@ -47,11 +47,11 @@ use log::info;
use spin::Once; use spin::Once;
use super::{ use super::{
nr_subpage_per_huge, frame::{
page::{ meta::{impl_frame_meta_for, mapping, MetaPageMeta},
meta::{impl_page_meta, mapping, MetaPageMeta}, Frame, Segment,
ContPages, Page,
}, },
nr_subpage_per_huge,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
page_table::{KernelMode, PageTable}, page_table::{KernelMode, PageTable},
Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE, Paddr, PagingConstsTrait, Vaddr, PAGE_SIZE,
@ -111,7 +111,7 @@ pub fn paddr_to_vaddr(pa: Paddr) -> usize {
/// Returns whether the given address should be mapped as tracked. /// Returns whether the given address should be mapped as tracked.
/// ///
/// About what is tracked mapping, see [`crate::mm::page::meta::MapTrackingStatus`]. /// About what is tracked mapping, see [`crate::mm::frame::meta::MapTrackingStatus`].
pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool { pub(crate) fn should_map_as_tracked(addr: Vaddr) -> bool {
!(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr)) !(LINEAR_MAPPING_VADDR_RANGE.contains(&addr) || VMALLOC_VADDR_RANGE.contains(&addr))
} }
@ -131,7 +131,7 @@ pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelMode, PageTableEntry, PagingC
/// ///
/// This function should be called before: /// This function should be called before:
/// - any initializer that modifies the kernel page table. /// - any initializer that modifies the kernel page table.
pub fn init_kernel_page_table(meta_pages: ContPages<MetaPageMeta>) { pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
info!("Initializing the kernel page table"); info!("Initializing the kernel page table");
let regions = crate::boot::memory_regions(); let regions = crate::boot::memory_regions();
@ -214,7 +214,7 @@ pub fn init_kernel_page_table(meta_pages: ContPages<MetaPageMeta>) {
}; };
let mut cursor = kpt.cursor_mut(&from).unwrap(); let mut cursor = kpt.cursor_mut(&from).unwrap();
for frame_paddr in to.step_by(PAGE_SIZE) { for frame_paddr in to.step_by(PAGE_SIZE) {
let page = Page::<KernelMeta>::from_unused(frame_paddr, KernelMeta::default()); let page = Frame::<KernelMeta>::from_unused(frame_paddr, KernelMeta::default());
// SAFETY: we are doing mappings for the kernel. // SAFETY: we are doing mappings for the kernel.
unsafe { unsafe {
let _old = cursor.map(page.into(), prop); let _old = cursor.map(page.into(), prop);
@ -251,4 +251,4 @@ pub unsafe fn activate_kernel_page_table() {
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct KernelMeta {} pub struct KernelMeta {}
impl_page_meta!(KernelMeta); impl_frame_meta_for!(KernelMeta);

View File

@ -14,7 +14,6 @@ pub(crate) mod heap_allocator;
mod io; mod io;
pub(crate) mod kspace; pub(crate) mod kspace;
mod offset; mod offset;
pub(crate) mod page;
pub(crate) mod page_prop; pub(crate) mod page_prop;
pub(crate) mod page_table; pub(crate) mod page_table;
pub mod stat; pub mod stat;
@ -25,7 +24,7 @@ use core::{fmt::Debug, ops::Range};
pub use self::{ pub use self::{
dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr}, dma::{Daddr, DmaCoherent, DmaDirection, DmaStream, DmaStreamSlice, HasDaddr},
frame::{options::FrameAllocOptions, Frame, Segment}, frame::untyped::{options::FrameAllocOptions, UntypedFrame, UntypedSegment},
io::{ io::{
Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader, Fallible, FallibleVmRead, FallibleVmWrite, Infallible, PodOnce, VmIo, VmIoOnce, VmReader,
VmWriter, VmWriter,
@ -34,7 +33,7 @@ pub use self::{
vm_space::VmSpace, vm_space::VmSpace,
}; };
pub(crate) use self::{ pub(crate) use self::{
kspace::paddr_to_vaddr, page::meta::init as init_page_meta, page_prop::PrivilegedPageFlags, frame::meta::init as init_page_meta, kspace::paddr_to_vaddr, page_prop::PrivilegedPageFlags,
page_table::PageTable, page_table::PageTable,
}; };
use crate::arch::mm::PagingConsts; use crate::arch::mm::PagingConsts;

View File

@ -1,191 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! A contiguous range of pages.
use alloc::vec::Vec;
use core::{mem::ManuallyDrop, ops::Range};
use super::{inc_page_ref_count, meta::PageMeta, Page};
use crate::mm::{Paddr, PAGE_SIZE};
/// A contiguous range of physical memory pages.
///
/// This is a handle to many contiguous pages. It will be more lightweight
/// than owning an array of page handles.
///
/// The ownership is achieved by the reference counting mechanism of pages.
/// When constructing a `ContPages`, the page handles are created then
/// forgotten, leaving the reference count. When dropping a it, the page
/// handles are restored and dropped, decrementing the reference count.
#[derive(Debug)]
pub struct ContPages<M: PageMeta> {
range: Range<Paddr>,
_marker: core::marker::PhantomData<M>,
}
impl<M: PageMeta> Drop for ContPages<M> {
fn drop(&mut self) {
for paddr in self.range.clone().step_by(PAGE_SIZE) {
// SAFETY: for each page there would be a forgotten handle
// when creating the `ContPages` object.
drop(unsafe { Page::<M>::from_raw(paddr) });
}
}
}
impl<M: PageMeta> Clone for ContPages<M> {
fn clone(&self) -> Self {
for paddr in self.range.clone().step_by(PAGE_SIZE) {
// SAFETY: for each page there would be a forgotten handle
// when creating the `ContPages` object, so we already have
// reference counts for the pages.
unsafe { inc_page_ref_count(paddr) };
}
Self {
range: self.range.clone(),
_marker: core::marker::PhantomData,
}
}
}
impl<M: PageMeta> ContPages<M> {
/// Creates a new `ContPages` from unused pages.
///
/// The caller must provide a closure to initialize metadata for all the pages.
/// The closure receives the physical address of the page and returns the
/// metadata, which is similar to [`core::array::from_fn`].
///
/// # Panics
///
/// The function panics if:
/// - the physical address is invalid or not aligned;
/// - any of the pages are already in use.
pub fn from_unused<F>(range: Range<Paddr>, mut metadata_fn: F) -> Self
where
F: FnMut(Paddr) -> M,
{
for paddr in range.clone().step_by(PAGE_SIZE) {
let _ = ManuallyDrop::new(Page::<M>::from_unused(paddr, metadata_fn(paddr)));
}
Self {
range,
_marker: core::marker::PhantomData,
}
}
/// Gets the start physical address of the contiguous pages.
pub fn start_paddr(&self) -> Paddr {
self.range.start
}
/// Gets the end physical address of the contiguous pages.
pub fn end_paddr(&self) -> Paddr {
self.range.end
}
/// Gets the length in bytes of the contiguous pages.
pub fn nbytes(&self) -> usize {
self.range.end - self.range.start
}
/// Splits the pages into two at the given byte offset from the start.
///
/// The resulting pages cannot be empty. So the offset cannot be neither
/// zero nor the length of the pages.
///
/// # Panics
///
/// The function panics if the offset is out of bounds, at either ends, or
/// not base-page-aligned.
pub fn split(self, offset: usize) -> (Self, Self) {
assert!(offset % PAGE_SIZE == 0);
assert!(0 < offset && offset < self.nbytes());
let old = ManuallyDrop::new(self);
let at = old.range.start + offset;
(
Self {
range: old.range.start..at,
_marker: core::marker::PhantomData,
},
Self {
range: at..old.range.end,
_marker: core::marker::PhantomData,
},
)
}
/// Gets an extra handle to the pages in the byte offset range.
///
/// The sliced byte offset range in indexed by the offset from the start of
/// the contiguous pages. The resulting pages holds extra reference counts.
///
/// # Panics
///
/// The function panics if the byte offset range is out of bounds, or if
/// any of the ends of the byte offset range is not base-page aligned.
pub fn slice(&self, range: &Range<usize>) -> Self {
assert!(range.start % PAGE_SIZE == 0 && range.end % PAGE_SIZE == 0);
let start = self.range.start + range.start;
let end = self.range.start + range.end;
assert!(start <= end && end <= self.range.end);
for paddr in (start..end).step_by(PAGE_SIZE) {
// SAFETY: We already have reference counts for the pages since
// for each page there would be a forgotten handle when creating
// the `ContPages` object.
unsafe { inc_page_ref_count(paddr) };
}
Self {
range: start..end,
_marker: core::marker::PhantomData,
}
}
}
impl<M: PageMeta> From<Page<M>> for ContPages<M> {
fn from(page: Page<M>) -> Self {
let pa = page.paddr();
let _ = ManuallyDrop::new(page);
Self {
range: pa..pa + PAGE_SIZE,
_marker: core::marker::PhantomData,
}
}
}
impl<M: PageMeta> From<ContPages<M>> for Vec<Page<M>> {
fn from(pages: ContPages<M>) -> Self {
let vector = pages
.range
.clone()
.step_by(PAGE_SIZE)
.map(|i|
// SAFETY: for each page there would be a forgotten handle
// when creating the `ContPages` object.
unsafe { Page::<M>::from_raw(i) })
.collect();
let _ = ManuallyDrop::new(pages);
vector
}
}
impl<M: PageMeta> Iterator for ContPages<M> {
type Item = Page<M>;
fn next(&mut self) -> Option<Self::Item> {
if self.range.start < self.range.end {
// SAFETY: each page in the range would be a handle forgotten
// when creating the `ContPages` object.
let page = unsafe { Page::<M>::from_raw(self.range.start) };
self.range.start += PAGE_SIZE;
// The end cannot be non-page-aligned.
debug_assert!(self.range.start <= self.range.end);
Some(page)
} else {
None
}
}
}

View File

@ -1,379 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Physical memory page management.
//!
//! A page is an aligned, contiguous range of bytes in physical memory. The sizes
//! of base pages and huge pages are architecture-dependent. A page can be mapped
//! to a virtual address using the page table.
//!
//! Pages can be accessed through page handles, namely, [`Page`]. A page handle
//! is a reference-counted handle to a page. When all handles to a page are dropped,
//! the page is released and can be reused.
//!
//! Pages can have dedicated metadata, which is implemented in the [`meta`] module.
//! The reference count and usage of a page are stored in the metadata as well, leaving
//! the handle only a pointer to the metadata.
pub mod allocator;
mod cont_pages;
pub mod meta;
use core::{
any::Any,
marker::PhantomData,
mem::ManuallyDrop,
sync::atomic::{AtomicUsize, Ordering},
};
pub use cont_pages::ContPages;
use meta::{
mapping, MetaSlot, PageMeta, PAGE_METADATA_MAX_ALIGN, PAGE_METADATA_MAX_SIZE, REF_COUNT_UNUSED,
};
use super::{frame::FrameMeta, Frame, PagingLevel, PAGE_SIZE};
use crate::mm::{Paddr, PagingConsts, Vaddr};
static MAX_PADDR: AtomicUsize = AtomicUsize::new(0);
/// A page with a statically-known usage, whose metadata is represented by `M`.
#[derive(Debug)]
pub struct Page<M: PageMeta> {
pub(super) ptr: *const MetaSlot,
pub(super) _marker: PhantomData<M>,
}
unsafe impl<M: PageMeta> Send for Page<M> {}
unsafe impl<M: PageMeta> Sync for Page<M> {}
impl<M: PageMeta> Page<M> {
/// Get a `Page` handle with a specific usage from a raw, unused page.
///
/// The caller should provide the initial metadata of the page.
///
/// # Panics
///
/// The function panics if:
/// - the physical address is out of bound or not aligned;
/// - the page is already in use.
pub fn from_unused(paddr: Paddr, metadata: M) -> Self {
assert!(paddr % PAGE_SIZE == 0);
assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
// Checking unsafe preconditions of the `PageMeta` trait.
debug_assert!(size_of::<M>() <= PAGE_METADATA_MAX_SIZE);
debug_assert!(align_of::<M>() <= PAGE_METADATA_MAX_ALIGN);
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
// immutable reference to it is always safe.
let slot = unsafe { &*ptr };
// `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata
// initialization won't be reordered before this memory compare-and-exchange.
slot.ref_count
.compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed)
.expect("Page already in use when trying to get a new handle");
// SAFETY: We have exclusive access to the page metadata.
let vtable_ptr = unsafe { &mut *slot.vtable_ptr.get() };
vtable_ptr.write(core::ptr::metadata(&metadata as &dyn PageMeta));
// SAFETY:
// 1. `ptr` points to the first field of `MetaSlot` (guaranteed by `repr(C)`), which is the
// metadata storage.
// 2. The size and the alignment of the metadata storage is large enough to hold `M`
// (guaranteed by the safety requirement of the `PageMeta` trait).
// 3. We have exclusive access to the metadata storage (guaranteed by the reference count).
unsafe { ptr.cast::<M>().cast_mut().write(metadata) };
// Assuming no one can create a `Page` instance directly from the page address, `Relaxed`
// is fine here. Otherwise, we should use `Release` to ensure that the metadata
// initialization won't be reordered after this memory store.
slot.ref_count.store(1, Ordering::Relaxed);
Self {
ptr,
_marker: PhantomData,
}
}
/// Forget the handle to the page.
///
/// This will result in the page being leaked without calling the custom dropper.
///
/// A physical address to the page is returned in case the page needs to be
/// restored using [`Page::from_raw`] later. This is useful when some architectural
/// data structures need to hold the page handle such as the page table.
#[allow(unused)]
pub(in crate::mm) fn into_raw(self) -> Paddr {
let paddr = self.paddr();
core::mem::forget(self);
paddr
}
/// Restore a forgotten `Page` from a physical address.
///
/// # Safety
///
/// The caller should only restore a `Page` that was previously forgotten using
/// [`Page::into_raw`].
///
/// And the restoring operation should only be done once for a forgotten
/// `Page`. Otherwise double-free will happen.
///
/// Also, the caller ensures that the usage of the page is correct. There's
/// no checking of the usage in this function.
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
Self {
ptr,
_marker: PhantomData,
}
}
/// Get the physical address.
pub fn paddr(&self) -> Paddr {
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
}
/// Get the paging level of this page.
///
/// This is the level of the page table entry that maps the frame,
/// which determines the size of the frame.
///
/// Currently, the level is always 1, which means the frame is a regular
/// page frame.
pub const fn level(&self) -> PagingLevel {
1
}
/// Size of this page in bytes.
pub const fn size(&self) -> usize {
PAGE_SIZE
}
/// Get the metadata of this page.
pub fn meta(&self) -> &M {
// SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably
// borrowed as `M` because the type is correct, it lives under the given lifetime, and no
// one will mutably borrow the page metadata after initialization.
unsafe { &*self.ptr.cast() }
}
/// Get the reference count of the page.
///
/// It returns the number of all references to the page, including all the
/// existing page handles ([`Page`], [`DynPage`]), and all the mappings in the
/// page table that points to the page.
///
/// # Safety
///
/// The function is safe to call, but using it requires extra care. The
/// reference count can be changed by other threads at any time including
/// potentially between calling this method and acting on the result.
pub fn reference_count(&self) -> u32 {
self.slot().ref_count.load(Ordering::Relaxed)
}
fn slot(&self) -> &MetaSlot {
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
// immutable reference to it is always safe.
unsafe { &*self.ptr }
}
}
impl<M: PageMeta> Clone for Page<M> {
fn clone(&self) -> Self {
// SAFETY: We have already held a reference to the page.
unsafe { self.slot().inc_ref_count() };
Self {
ptr: self.ptr,
_marker: PhantomData,
}
}
}
impl<M: PageMeta> Drop for Page<M> {
fn drop(&mut self) {
let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release);
debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED);
if last_ref_cnt == 1 {
// A fence is needed here with the same reasons stated in the implementation of
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
core::sync::atomic::fence(Ordering::Acquire);
// SAFETY: this is the last reference and is about to be dropped.
unsafe {
meta::drop_last_in_place(self.ptr as *mut MetaSlot);
}
}
}
}
/// A page with a dynamically-known usage.
///
/// It can also be used when the user don't care about the usage of the page.
#[derive(Debug)]
pub struct DynPage {
ptr: *const MetaSlot,
}
unsafe impl Send for DynPage {}
unsafe impl Sync for DynPage {}
impl DynPage {
/// Forget the handle to the page.
///
/// This is the same as [`Page::into_raw`].
///
/// This will result in the page being leaked without calling the custom dropper.
///
/// A physical address to the page is returned in case the page needs to be
/// restored using [`Self::from_raw`] later.
pub(in crate::mm) fn into_raw(self) -> Paddr {
let paddr = self.paddr();
core::mem::forget(self);
paddr
}
/// Restore a forgotten page from a physical address.
///
/// # Safety
///
/// The safety concerns are the same as [`Page::from_raw`].
pub(in crate::mm) unsafe fn from_raw(paddr: Paddr) -> Self {
let vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
let ptr = vaddr as *const MetaSlot;
Self { ptr }
}
/// Get the metadata of this page.
pub fn meta(&self) -> &dyn Any {
let slot = self.slot();
// SAFETY: The page metadata is valid to be borrowed immutably, since it will never be
// borrowed mutably after initialization.
let vtable_ptr = unsafe { &*slot.vtable_ptr.get() };
// SAFETY: The page metadata is initialized and valid.
let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
let meta_ptr: *const dyn PageMeta = core::ptr::from_raw_parts(self.ptr, vtable_ptr);
// SAFETY: `self.ptr` points to the metadata storage which is valid to be immutably
// borrowed under `vtable_ptr` because the vtable is correct, it lives under the given
// lifetime, and no one will mutably borrow the page metadata after initialization.
(unsafe { &*meta_ptr }) as &dyn Any
}
/// Get the physical address of the start of the page
pub fn paddr(&self) -> Paddr {
mapping::meta_to_page::<PagingConsts>(self.ptr as Vaddr)
}
/// Get the paging level of this page.
pub fn level(&self) -> PagingLevel {
1
}
/// Size of this page in bytes.
pub fn size(&self) -> usize {
PAGE_SIZE
}
fn slot(&self) -> &MetaSlot {
// SAFETY: `ptr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking an
// immutable reference to it is always safe.
unsafe { &*self.ptr }
}
}
impl<M: PageMeta> TryFrom<DynPage> for Page<M> {
type Error = DynPage;
/// Try converting a [`DynPage`] into the statically-typed [`Page`].
///
/// If the usage of the page is not the same as the expected usage, it will
/// return the dynamic page itself as is.
fn try_from(dyn_page: DynPage) -> Result<Self, Self::Error> {
if dyn_page.meta().is::<M>() {
let result = Page {
ptr: dyn_page.ptr,
_marker: PhantomData,
};
let _ = ManuallyDrop::new(dyn_page);
Ok(result)
} else {
Err(dyn_page)
}
}
}
impl<M: PageMeta> From<Page<M>> for DynPage {
fn from(page: Page<M>) -> Self {
let result = Self { ptr: page.ptr };
let _ = ManuallyDrop::new(page);
result
}
}
impl From<Frame> for DynPage {
fn from(frame: Frame) -> Self {
Page::<FrameMeta>::from(frame).into()
}
}
impl Clone for DynPage {
fn clone(&self) -> Self {
// SAFETY: We have already held a reference to the page.
unsafe { self.slot().inc_ref_count() };
Self { ptr: self.ptr }
}
}
impl Drop for DynPage {
fn drop(&mut self) {
let last_ref_cnt = self.slot().ref_count.fetch_sub(1, Ordering::Release);
debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED);
if last_ref_cnt == 1 {
// A fence is needed here with the same reasons stated in the implementation of
// `Arc::drop`: <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.drop>.
core::sync::atomic::fence(Ordering::Acquire);
// SAFETY: this is the last reference and is about to be dropped.
unsafe {
meta::drop_last_in_place(self.ptr as *mut MetaSlot);
}
}
}
}
/// Increases the reference count of the page by one.
///
/// # Safety
///
/// The caller should ensure the following conditions:
/// 1. The physical address must represent a valid page;
/// 2. The caller must have already held a reference to the page.
pub(in crate::mm) unsafe fn inc_page_ref_count(paddr: Paddr) {
debug_assert!(paddr % PAGE_SIZE == 0);
debug_assert!(paddr < MAX_PADDR.load(Ordering::Relaxed) as Paddr);
let vaddr: Vaddr = mapping::page_to_meta::<PagingConsts>(paddr);
// SAFETY: `vaddr` points to a valid `MetaSlot` that will never be mutably borrowed, so taking
// an immutable reference to it is always safe.
let slot = unsafe { &*(vaddr as *const MetaSlot) };
// SAFETY: We have already held a reference to the page.
unsafe { slot.inc_ref_count() };
}

View File

@ -16,7 +16,7 @@ use crate::{
cpu::num_cpus, cpu::num_cpus,
cpu_local_cell, cpu_local_cell,
mm::{ mm::{
nr_subpage_per_huge, paddr_to_vaddr, page::allocator::PAGE_ALLOCATOR, Paddr, PageProperty, frame::allocator::PAGE_ALLOCATOR, nr_subpage_per_huge, paddr_to_vaddr, Paddr, PageProperty,
PagingConstsTrait, Vaddr, PAGE_SIZE, PagingConstsTrait, Vaddr, PAGE_SIZE,
}, },
sync::SpinLock, sync::SpinLock,

View File

@ -76,7 +76,7 @@ use super::{
}; };
use crate::{ use crate::{
mm::{ mm::{
kspace::should_map_as_tracked, paddr_to_vaddr, page::DynPage, Paddr, PageProperty, Vaddr, frame::AnyFrame, kspace::should_map_as_tracked, paddr_to_vaddr, Paddr, PageProperty, Vaddr,
}, },
task::{disable_preempt, DisabledPreemptGuard}, task::{disable_preempt, DisabledPreemptGuard},
}; };
@ -89,7 +89,7 @@ pub enum PageTableItem {
}, },
Mapped { Mapped {
va: Vaddr, va: Vaddr,
page: DynPage, page: AnyFrame,
prop: PageProperty, prop: PageProperty,
}, },
#[allow(dead_code)] #[allow(dead_code)]
@ -231,7 +231,7 @@ where
len: page_size::<C>(level), len: page_size::<C>(level),
}); });
} }
Child::Page(page, prop) => { Child::Frame(page, prop) => {
return Ok(PageTableItem::Mapped { va, page, prop }); return Ok(PageTableItem::Mapped { va, page, prop });
} }
Child::Untracked(pa, plevel, prop) => { Child::Untracked(pa, plevel, prop) => {
@ -400,9 +400,9 @@ where
self.0.query() self.0.query()
} }
/// Maps the range starting from the current address to a [`DynPage`]. /// Maps the range starting from the current address to a [`AnyFrame`].
/// ///
/// It returns the previously mapped [`DynPage`] if that exists. /// It returns the previously mapped [`AnyFrame`] if that exists.
/// ///
/// # Panics /// # Panics
/// ///
@ -415,7 +415,7 @@ where
/// ///
/// The caller should ensure that the virtual range being mapped does /// The caller should ensure that the virtual range being mapped does
/// not affect kernel's memory safety. /// not affect kernel's memory safety.
pub unsafe fn map(&mut self, page: DynPage, prop: PageProperty) -> Option<DynPage> { pub unsafe fn map(&mut self, page: AnyFrame, prop: PageProperty) -> Option<AnyFrame> {
let end = self.0.va + page.size(); let end = self.0.va + page.size();
assert!(end <= self.0.barrier_va.end); assert!(end <= self.0.barrier_va.end);
@ -437,7 +437,7 @@ where
let _ = cur_entry.replace(Child::PageTable(pt.clone_raw())); let _ = cur_entry.replace(Child::PageTable(pt.clone_raw()));
self.0.push_level(pt); self.0.push_level(pt);
} }
Child::Page(_, _) => { Child::Frame(_, _) => {
panic!("Mapping a smaller page in an already mapped huge page"); panic!("Mapping a smaller page in an already mapped huge page");
} }
Child::Untracked(_, _, _) => { Child::Untracked(_, _, _) => {
@ -449,11 +449,11 @@ where
debug_assert_eq!(self.0.level, page.level()); debug_assert_eq!(self.0.level, page.level());
// Map the current page. // Map the current page.
let old = self.0.cur_entry().replace(Child::Page(page, prop)); let old = self.0.cur_entry().replace(Child::Frame(page, prop));
self.0.move_forward(); self.0.move_forward();
match old { match old {
Child::Page(old_page, _) => Some(old_page), Child::Frame(old_page, _) => Some(old_page),
Child::None => None, Child::None => None,
Child::PageTable(_) => { Child::PageTable(_) => {
todo!("Dropping page table nodes while mapping requires TLB flush") todo!("Dropping page table nodes while mapping requires TLB flush")
@ -520,7 +520,7 @@ where
let _ = cur_entry.replace(Child::PageTable(pt.clone_raw())); let _ = cur_entry.replace(Child::PageTable(pt.clone_raw()));
self.0.push_level(pt); self.0.push_level(pt);
} }
Child::Page(_, _) => { Child::Frame(_, _) => {
panic!("Mapping a smaller page in an already mapped huge page"); panic!("Mapping a smaller page in an already mapped huge page");
} }
Child::Untracked(_, _, _) => { Child::Untracked(_, _, _) => {
@ -614,7 +614,7 @@ where
Child::None => { Child::None => {
unreachable!("Already checked"); unreachable!("Already checked");
} }
Child::Page(_, _) => { Child::Frame(_, _) => {
panic!("Removing part of a huge page"); panic!("Removing part of a huge page");
} }
Child::Untracked(_, _, _) => { Child::Untracked(_, _, _) => {
@ -631,7 +631,7 @@ where
self.0.move_forward(); self.0.move_forward();
return match old { return match old {
Child::Page(page, prop) => PageTableItem::Mapped { Child::Frame(page, prop) => PageTableItem::Mapped {
va: self.0.va, va: self.0.va,
page, page,
prop, prop,
@ -796,7 +796,7 @@ where
Child::Untracked(_, _, _) => { Child::Untracked(_, _, _) => {
panic!("Copying untracked mappings"); panic!("Copying untracked mappings");
} }
Child::Page(page, mut prop) => { Child::Frame(page, mut prop) => {
let mapped_page_size = page.size(); let mapped_page_size = page.size();
// Do protection. // Do protection.

View File

@ -8,7 +8,7 @@ use super::{MapTrackingStatus, PageTableEntryTrait, RawPageTableNode};
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
mm::{ mm::{
page::{inc_page_ref_count, DynPage}, frame::{inc_page_ref_count, AnyFrame},
page_prop::PageProperty, page_prop::PageProperty,
Paddr, PagingConstsTrait, PagingLevel, Paddr, PagingConstsTrait, PagingLevel,
}, },
@ -27,7 +27,7 @@ pub(in crate::mm) enum Child<
[(); C::NR_LEVELS as usize]:, [(); C::NR_LEVELS as usize]:,
{ {
PageTable(RawPageTableNode<E, C>), PageTable(RawPageTableNode<E, C>),
Page(DynPage, PageProperty), Frame(AnyFrame, PageProperty),
/// Pages not tracked by handles. /// Pages not tracked by handles.
Untracked(Paddr, PagingLevel, PageProperty), Untracked(Paddr, PagingLevel, PageProperty),
None, None,
@ -53,7 +53,7 @@ where
) -> bool { ) -> bool {
match self { match self {
Child::PageTable(pt) => node_level == pt.level() + 1, Child::PageTable(pt) => node_level == pt.level() + 1,
Child::Page(p, _) => { Child::Frame(p, _) => {
node_level == p.level() && is_tracked == MapTrackingStatus::Tracked node_level == p.level() && is_tracked == MapTrackingStatus::Tracked
} }
Child::Untracked(_, level, _) => { Child::Untracked(_, level, _) => {
@ -78,7 +78,7 @@ where
let pt = ManuallyDrop::new(pt); let pt = ManuallyDrop::new(pt);
E::new_pt(pt.paddr()) E::new_pt(pt.paddr())
} }
Child::Page(page, prop) => { Child::Frame(page, prop) => {
let level = page.level(); let level = page.level();
E::new_page(page.into_raw(), level, prop) E::new_page(page.into_raw(), level, prop)
} }
@ -119,8 +119,8 @@ where
match is_tracked { match is_tracked {
MapTrackingStatus::Tracked => { MapTrackingStatus::Tracked => {
// SAFETY: The physical address points to a valid page. // SAFETY: The physical address points to a valid page.
let page = unsafe { DynPage::from_raw(paddr) }; let page = unsafe { AnyFrame::from_raw(paddr) };
Child::Page(page, pte.prop()) Child::Frame(page, pte.prop())
} }
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),
MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"), MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"),
@ -162,8 +162,8 @@ where
// the reference to the page. // the reference to the page.
unsafe { inc_page_ref_count(paddr) }; unsafe { inc_page_ref_count(paddr) };
// SAFETY: The physical address points to a valid page. // SAFETY: The physical address points to a valid page.
let page = unsafe { DynPage::from_raw(paddr) }; let page = unsafe { AnyFrame::from_raw(paddr) };
Child::Page(page, pte.prop()) Child::Frame(page, pte.prop())
} }
MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()), MapTrackingStatus::Untracked => Child::Untracked(paddr, level, pte.prop()),
MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"), MapTrackingStatus::NotApplicable => panic!("Invalid tracking status"),

View File

@ -40,9 +40,8 @@ use super::{nr_subpage_per_huge, PageTableEntryTrait};
use crate::{ use crate::{
arch::mm::{PageTableEntry, PagingConsts}, arch::mm::{PageTableEntry, PagingConsts},
mm::{ mm::{
paddr_to_vaddr, frame::{self, inc_page_ref_count, meta::FrameMeta, AnyFrame, Frame},
page::{self, inc_page_ref_count, meta::PageMeta, DynPage, Page}, paddr_to_vaddr, Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
Paddr, PagingConstsTrait, PagingLevel, PAGE_SIZE,
}, },
}; };
@ -79,7 +78,7 @@ where
/// Converts a raw handle to an accessible handle by pertaining the lock. /// Converts a raw handle to an accessible handle by pertaining the lock.
pub(super) fn lock(self) -> PageTableNode<E, C> { pub(super) fn lock(self) -> PageTableNode<E, C> {
let level = self.level; let level = self.level;
let page: Page<PageTablePageMeta<E, C>> = self.into(); let page: Frame<PageTablePageMeta<E, C>> = self.into();
// Acquire the lock. // Acquire the lock.
let meta = page.meta(); let meta = page.meta();
@ -187,7 +186,7 @@ where
} }
impl<E: PageTableEntryTrait, C: PagingConstsTrait> From<RawPageTableNode<E, C>> impl<E: PageTableEntryTrait, C: PagingConstsTrait> From<RawPageTableNode<E, C>>
for Page<PageTablePageMeta<E, C>> for Frame<PageTablePageMeta<E, C>>
where where
[(); C::NR_LEVELS as usize]:, [(); C::NR_LEVELS as usize]:,
{ {
@ -196,7 +195,7 @@ where
// SAFETY: The physical address in the raw handle is valid and we are // SAFETY: The physical address in the raw handle is valid and we are
// transferring the ownership to a new handle. No increment of the reference // transferring the ownership to a new handle. No increment of the reference
// count is needed. // count is needed.
unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(raw.paddr()) } unsafe { Frame::<PageTablePageMeta<E, C>>::from_raw(raw.paddr()) }
} }
} }
@ -207,7 +206,7 @@ where
fn drop(&mut self) { fn drop(&mut self) {
// SAFETY: The physical address in the raw handle is valid. The restored // SAFETY: The physical address in the raw handle is valid. The restored
// handle is dropped to decrement the reference count. // handle is dropped to decrement the reference count.
drop(unsafe { Page::<PageTablePageMeta<E, C>>::from_raw(self.paddr()) }); drop(unsafe { Frame::<PageTablePageMeta<E, C>>::from_raw(self.paddr()) });
} }
} }
@ -225,7 +224,7 @@ pub(super) struct PageTableNode<
> where > where
[(); C::NR_LEVELS as usize]:, [(); C::NR_LEVELS as usize]:,
{ {
page: Page<PageTablePageMeta<E, C>>, page: Frame<PageTablePageMeta<E, C>>,
} }
impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C> impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageTableNode<E, C>
@ -261,7 +260,7 @@ where
/// extra unnecessary expensive operation. /// extra unnecessary expensive operation.
pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self { pub(super) fn alloc(level: PagingLevel, is_tracked: MapTrackingStatus) -> Self {
let meta = PageTablePageMeta::new_locked(level, is_tracked); let meta = PageTablePageMeta::new_locked(level, is_tracked);
let page = page::allocator::alloc_single::<PageTablePageMeta<E, C>>(meta).unwrap(); let page = frame::allocator::alloc_single::<PageTablePageMeta<E, C>>(meta).unwrap();
// Zero out the page table node. // Zero out the page table node.
let ptr = paddr_to_vaddr(page.paddr()) as *mut u8; let ptr = paddr_to_vaddr(page.paddr()) as *mut u8;
@ -407,7 +406,7 @@ where
// SAFETY: The layout of the `PageTablePageMeta` is ensured to be the same for // SAFETY: The layout of the `PageTablePageMeta` is ensured to be the same for
// all possible generic parameters. And the layout fits the requirements. // all possible generic parameters. And the layout fits the requirements.
unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> PageMeta for PageTablePageMeta<E, C> unsafe impl<E: PageTableEntryTrait, C: PagingConstsTrait> FrameMeta for PageTablePageMeta<E, C>
where where
[(); C::NR_LEVELS as usize]:, [(); C::NR_LEVELS as usize]:,
{ {
@ -439,11 +438,11 @@ where
if !pte.is_last(level) { if !pte.is_last(level) {
// SAFETY: The PTE points to a page table node. The ownership // SAFETY: The PTE points to a page table node. The ownership
// of the child is transferred to the child then dropped. // of the child is transferred to the child then dropped.
drop(unsafe { Page::<Self>::from_raw(paddr) }); drop(unsafe { Frame::<Self>::from_raw(paddr) });
} else if is_tracked == MapTrackingStatus::Tracked { } else if is_tracked == MapTrackingStatus::Tracked {
// SAFETY: The PTE points to a tracked page. The ownership // SAFETY: The PTE points to a tracked page. The ownership
// of the child is transferred to the child then dropped. // of the child is transferred to the child then dropped.
drop(unsafe { DynPage::from_raw(paddr) }); drop(unsafe { AnyFrame::from_raw(paddr) });
} }
} }
} }

View File

@ -5,9 +5,8 @@ use core::mem::ManuallyDrop;
use super::*; use super::*;
use crate::{ use crate::{
mm::{ mm::{
frame::FrameMeta, frame::{allocator, untyped::UntypedMeta},
kspace::LINEAR_MAPPING_BASE_VADDR, kspace::LINEAR_MAPPING_BASE_VADDR,
page::allocator,
page_prop::{CachePolicy, PageFlags}, page_prop::{CachePolicy, PageFlags},
MAX_USERSPACE_VADDR, MAX_USERSPACE_VADDR,
}, },
@ -32,7 +31,7 @@ fn test_tracked_map_unmap() {
let pt = PageTable::<UserMode>::empty(); let pt = PageTable::<UserMode>::empty();
let from = PAGE_SIZE..PAGE_SIZE * 2; let from = PAGE_SIZE..PAGE_SIZE * 2;
let page = allocator::alloc_single(FrameMeta::default()).unwrap(); let page = allocator::alloc_single(UntypedMeta::default()).unwrap();
let start_paddr = page.paddr(); let start_paddr = page.paddr();
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) }; unsafe { pt.cursor_mut(&from).unwrap().map(page.into(), prop) };
@ -88,7 +87,7 @@ fn test_user_copy_on_write() {
let pt = PageTable::<UserMode>::empty(); let pt = PageTable::<UserMode>::empty();
let from = PAGE_SIZE..PAGE_SIZE * 2; let from = PAGE_SIZE..PAGE_SIZE * 2;
let page = allocator::alloc_single(FrameMeta::default()).unwrap(); let page = allocator::alloc_single(UntypedMeta::default()).unwrap();
let start_paddr = page.paddr(); let start_paddr = page.paddr();
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) }; unsafe { pt.cursor_mut(&from).unwrap().map(page.clone().into(), prop) };
@ -173,7 +172,7 @@ fn test_base_protect_query() {
let from_ppn = 1..1000; let from_ppn = 1..1000;
let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end; let from = PAGE_SIZE * from_ppn.start..PAGE_SIZE * from_ppn.end;
let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| FrameMeta::default()).unwrap(); let to = allocator::alloc_contiguous(999 * PAGE_SIZE, |_| UntypedMeta::default()).unwrap();
let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback); let prop = PageProperty::new(PageFlags::RW, CachePolicy::Writeback);
unsafe { unsafe {
let mut cursor = pt.cursor_mut(&from).unwrap(); let mut cursor = pt.cursor_mut(&from).unwrap();

View File

@ -2,7 +2,7 @@
//! APIs for memory statistics. //! APIs for memory statistics.
use crate::mm::page::allocator::PAGE_ALLOCATOR; use crate::mm::frame::allocator::PAGE_ALLOCATOR;
/// Total memory available for any usages in the system (in bytes). /// Total memory available for any usages in the system (in bytes).
/// ///

View File

@ -5,7 +5,7 @@
use alloc::vec::Vec; use alloc::vec::Vec;
use core::ops::Range; use core::ops::Range;
use super::{page::DynPage, Vaddr, PAGE_SIZE}; use super::{frame::AnyFrame, Vaddr, PAGE_SIZE};
use crate::{ use crate::{
cpu::{CpuSet, PinCurrentCpu}, cpu::{CpuSet, PinCurrentCpu},
cpu_local, cpu_local,
@ -77,7 +77,7 @@ impl<G: PinCurrentCpu> TlbFlusher<G> {
/// flushed. Otherwise if the page is recycled for other purposes, the user /// flushed. Otherwise if the page is recycled for other purposes, the user
/// space program can still access the page through the TLB entries. This /// space program can still access the page through the TLB entries. This
/// method is designed to be used in such cases. /// method is designed to be used in such cases.
pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: DynPage) { pub fn issue_tlb_flush_with(&self, op: TlbFlushOp, drop_after_flush: AnyFrame) {
self.issue_tlb_flush_(op, Some(drop_after_flush)); self.issue_tlb_flush_(op, Some(drop_after_flush));
} }
@ -91,7 +91,7 @@ impl<G: PinCurrentCpu> TlbFlusher<G> {
self.need_self_flush self.need_self_flush
} }
fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option<DynPage>) { fn issue_tlb_flush_(&self, op: TlbFlushOp, drop_after_flush: Option<AnyFrame>) {
let op = op.optimize_for_large_range(); let op = op.optimize_for_large_range();
// Fast path for single CPU cases. // Fast path for single CPU cases.
@ -156,7 +156,7 @@ impl TlbFlushOp {
// Lock ordering: lock FLUSH_OPS before PAGE_KEEPER. // Lock ordering: lock FLUSH_OPS before PAGE_KEEPER.
cpu_local! { cpu_local! {
static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new()); static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new());
static PAGE_KEEPER: SpinLock<Vec<DynPage>, LocalIrqDisabled> = SpinLock::new(Vec::new()); static PAGE_KEEPER: SpinLock<Vec<AnyFrame>, LocalIrqDisabled> = SpinLock::new(Vec::new());
} }
fn do_remote_flush() { fn do_remote_flush() {

View File

@ -22,7 +22,7 @@ use crate::{
kspace::KERNEL_PAGE_TABLE, kspace::KERNEL_PAGE_TABLE,
page_table::{self, PageTable, PageTableItem, UserMode}, page_table::{self, PageTable, PageTableItem, UserMode},
tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD}, tlb::{TlbFlushOp, TlbFlusher, FLUSH_ALL_RANGE_THRESHOLD},
Frame, PageProperty, VmReader, VmWriter, MAX_USERSPACE_VADDR, PageProperty, UntypedFrame, VmReader, VmWriter, MAX_USERSPACE_VADDR,
}, },
prelude::*, prelude::*,
sync::{PreemptDisabled, RwLock, RwLockReadGuard}, sync::{PreemptDisabled, RwLock, RwLockReadGuard},
@ -40,7 +40,7 @@ use crate::{
/// ///
/// A newly-created `VmSpace` is not backed by any physical memory pages. To /// A newly-created `VmSpace` is not backed by any physical memory pages. To
/// provide memory pages for a `VmSpace`, one can allocate and map physical /// provide memory pages for a `VmSpace`, one can allocate and map physical
/// memory ([`Frame`]s) to the `VmSpace` using the cursor. /// memory ([`UntypedFrame`]s) to the `VmSpace` using the cursor.
/// ///
/// A `VmSpace` can also attach a page fault handler, which will be invoked to /// A `VmSpace` can also attach a page fault handler, which will be invoked to
/// handle page faults generated from user space. /// handle page faults generated from user space.
@ -323,7 +323,7 @@ impl CursorMut<'_, '_> {
/// Map a frame into the current slot. /// Map a frame into the current slot.
/// ///
/// This method will bring the cursor to the next slot after the modification. /// This method will bring the cursor to the next slot after the modification.
pub fn map(&mut self, frame: Frame, prop: PageProperty) { pub fn map(&mut self, frame: UntypedFrame, prop: PageProperty) {
let start_va = self.virt_addr(); let start_va = self.virt_addr();
// SAFETY: It is safe to map untyped memory into the userspace. // SAFETY: It is safe to map untyped memory into the userspace.
let old = unsafe { self.pt_cursor.map(frame.into(), prop) }; let old = unsafe { self.pt_cursor.map(frame.into(), prop) };
@ -475,7 +475,7 @@ pub enum VmItem {
/// The virtual address of the slot. /// The virtual address of the slot.
va: Vaddr, va: Vaddr,
/// The mapped frame. /// The mapped frame.
frame: Frame, frame: UntypedFrame,
/// The property of the slot. /// The property of the slot.
prop: PageProperty, prop: PageProperty,
}, },

View File

@ -1,10 +1,10 @@
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
use crate::{ use crate::{
impl_page_meta, impl_frame_meta_for,
mm::{ mm::{
frame::allocator,
kspace::kvirt_area::{KVirtArea, Tracked}, kspace::kvirt_area::{KVirtArea, Tracked},
page::allocator,
page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags}, page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
PAGE_SIZE, PAGE_SIZE,
}, },
@ -38,7 +38,7 @@ pub struct KernelStack {
#[derive(Debug, Default)] #[derive(Debug, Default)]
struct KernelStackMeta {} struct KernelStackMeta {}
impl_page_meta!(KernelStackMeta); impl_frame_meta_for!(KernelStackMeta);
impl KernelStack { impl KernelStack {
/// Generates a kernel stack with guard pages. /// Generates a kernel stack with guard pages.